Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2 | /* |
Paul E. McKenney | 29766f1 | 2006-06-27 02:54:02 -0700 | [diff] [blame] | 3 | * Read-Copy Update module-based torture test facility |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 4 | * |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2005, 2006 |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 6 | * |
Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Josh Triplett | e0198b29 | 2014-07-30 16:08:42 -0700 | [diff] [blame] | 8 | * Josh Triplett <josh@joshtriplett.org> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 9 | * |
Mauro Carvalho Chehab | 43cb545 | 2020-04-21 19:04:06 +0200 | [diff] [blame] | 10 | * See also: Documentation/RCU/torture.rst |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 11 | */ |
Paul E. McKenney | 6050003 | 2018-05-15 12:25:05 -0700 | [diff] [blame] | 12 | |
| 13 | #define pr_fmt(fmt) fmt |
| 14 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 15 | #include <linux/types.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/kthread.h> |
| 20 | #include <linux/err.h> |
| 21 | #include <linux/spinlock.h> |
| 22 | #include <linux/smp.h> |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 23 | #include <linux/rcupdate_wait.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 24 | #include <linux/interrupt.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 25 | #include <linux/sched/signal.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 26 | #include <uapi/linux/sched/types.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 27 | #include <linux/atomic.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 28 | #include <linux/bitops.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 29 | #include <linux/completion.h> |
| 30 | #include <linux/moduleparam.h> |
| 31 | #include <linux/percpu.h> |
| 32 | #include <linux/notifier.h> |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 33 | #include <linux/reboot.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 34 | #include <linux/freezer.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 35 | #include <linux/cpu.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 36 | #include <linux/delay.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 37 | #include <linux/stat.h> |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 38 | #include <linux/srcu.h> |
Robert P. J. Day | 1aeb272 | 2008-04-29 00:59:25 -0700 | [diff] [blame] | 39 | #include <linux/slab.h> |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 40 | #include <linux/trace_clock.h> |
Harvey Harrison | f07767f | 2008-10-20 10:23:38 -0700 | [diff] [blame] | 41 | #include <asm/byteorder.h> |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 42 | #include <linux/torture.h> |
Paul E. McKenney | 38706bc | 2014-08-18 21:12:17 -0700 | [diff] [blame] | 43 | #include <linux/vmalloc.h> |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 44 | #include <linux/sched/debug.h> |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 45 | #include <linux/sched/sysctl.h> |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 46 | #include <linux/oom.h> |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 47 | #include <linux/tick.h> |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 48 | #include <linux/rcupdate_trace.h> |
Wander Lairson Costa | 5ff7c9f | 2021-11-10 11:37:45 -0300 | [diff] [blame] | 49 | #include <linux/nmi.h> |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 50 | |
Paul E. McKenney | 25c3632 | 2017-05-03 09:51:55 -0700 | [diff] [blame] | 51 | #include "rcu.h" |
| 52 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 53 | MODULE_LICENSE("GPL"); |
Paul E. McKenney | 2e24ce8 | 2019-01-17 10:16:42 -0800 | [diff] [blame] | 54 | MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 55 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 56 | /* Bits for ->extendables field, extendables param, and related definitions. */ |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 57 | #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ |
| 58 | #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) |
| 59 | #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ |
| 60 | #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 61 | #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ |
| 62 | #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ |
| 63 | #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ |
| 64 | #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ |
| 65 | #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 66 | #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ |
| 67 | #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ |
| 68 | #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 69 | #define RCUTORTURE_MAX_EXTEND \ |
| 70 | (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ |
| 71 | RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 72 | #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ |
| 73 | /* Must be power of two minus one. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 74 | #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 75 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 76 | torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, |
| 77 | "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 78 | torture_param(int, fqs_duration, 0, |
| 79 | "Duration of fqs bursts (us), 0 to disable"); |
| 80 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); |
| 81 | torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 82 | torture_param(int, fwd_progress, 1, "Test grace-period forward progress"); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 83 | torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); |
| 84 | torture_param(int, fwd_progress_holdoff, 60, |
| 85 | "Time between forward-progress tests (s)"); |
| 86 | torture_param(bool, fwd_progress_need_resched, 1, |
| 87 | "Hide cond_resched() behind need_resched()"); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 88 | torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 89 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); |
| 90 | torture_param(bool, gp_normal, false, |
| 91 | "Use normal (non-expedited) GP wait primitives"); |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 92 | torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 93 | torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 94 | torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); |
Paul E. McKenney | d685514 | 2020-08-11 10:33:39 -0700 | [diff] [blame] | 95 | torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 96 | torture_param(int, n_barrier_cbs, 0, |
| 97 | "# of callbacks/kthreads for barrier testing"); |
| 98 | torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); |
| 99 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); |
| 100 | torture_param(int, object_debug, 0, |
| 101 | "Enable debug-object double call_rcu() testing"); |
| 102 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); |
| 103 | torture_param(int, onoff_interval, 0, |
Paul E. McKenney | 028be12 | 2018-05-08 09:20:34 -0700 | [diff] [blame] | 104 | "Time between CPU hotplugs (jiffies), 0=disable"); |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 105 | torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); |
| 106 | torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 107 | torture_param(int, read_exit_delay, 13, |
| 108 | "Delay between read-then-exit episodes (s)"); |
| 109 | torture_param(int, read_exit_burst, 16, |
| 110 | "# of read-then-exit bursts per episode, zero to disable"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 111 | torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); |
| 112 | torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); |
| 113 | torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); |
| 114 | torture_param(int, stall_cpu_holdoff, 10, |
| 115 | "Time to wait before starting stall (s)."); |
Wander Lairson Costa | 5ff7c9f | 2021-11-10 11:37:45 -0300 | [diff] [blame] | 116 | torture_param(bool, stall_no_softlockup, false, |
| 117 | "Avoid softlockup warning during cpu stall."); |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 118 | torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 119 | torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 120 | torture_param(int, stall_gp_kthread, 0, |
| 121 | "Grace-period kthread stall duration (s)."); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 122 | torture_param(int, stat_interval, 60, |
| 123 | "Number of seconds between stats printk()s"); |
| 124 | torture_param(int, stutter, 5, "Number of seconds to run/halt test"); |
| 125 | torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); |
| 126 | torture_param(int, test_boost_duration, 4, |
| 127 | "Duration of each boost test, seconds."); |
| 128 | torture_param(int, test_boost_interval, 7, |
| 129 | "Interval between boost tests, seconds."); |
| 130 | torture_param(bool, test_no_idle_hz, true, |
| 131 | "Test support for tickless idle CPUs"); |
Paul E. McKenney | 90127d6 | 2018-05-09 10:29:18 -0700 | [diff] [blame] | 132 | torture_param(int, verbose, 1, |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 133 | "Enable verbose debugging printk()s"); |
Paul E. McKenney | 9e25022 | 2014-01-27 16:27:00 -0800 | [diff] [blame] | 134 | |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 135 | static char *torture_type = "rcu"; |
Josh Triplett | d6ad671 | 2007-03-06 01:42:13 -0800 | [diff] [blame] | 136 | module_param(torture_type, charp, 0444); |
Paul E. McKenney | c770c82 | 2018-07-07 10:28:07 -0700 | [diff] [blame] | 137 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 138 | |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 139 | static int nrealnocbers; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 140 | static int nrealreaders; |
| 141 | static struct task_struct *writer_task; |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 142 | static struct task_struct **fakewriter_tasks; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 143 | static struct task_struct **reader_tasks; |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 144 | static struct task_struct **nocb_tasks; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 145 | static struct task_struct *stats_task; |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 146 | static struct task_struct *fqs_task; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 147 | static struct task_struct *boost_tasks[NR_CPUS]; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 148 | static struct task_struct *stall_task; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 149 | static struct task_struct **fwd_prog_tasks; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 150 | static struct task_struct **barrier_cbs_tasks; |
| 151 | static struct task_struct *barrier_task; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 152 | static struct task_struct *read_exit_task; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 153 | |
| 154 | #define RCU_TORTURE_PIPE_LEN 10 |
| 155 | |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 156 | // Mailbox-like structure to check RCU global memory ordering. |
| 157 | struct rcu_torture_reader_check { |
| 158 | unsigned long rtc_myloops; |
| 159 | int rtc_chkrdr; |
| 160 | unsigned long rtc_chkloops; |
| 161 | int rtc_ready; |
| 162 | struct rcu_torture_reader_check *rtc_assigner; |
| 163 | } ____cacheline_internodealigned_in_smp; |
| 164 | |
| 165 | // Update-side data structure used to check RCU readers. |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 166 | struct rcu_torture { |
| 167 | struct rcu_head rtort_rcu; |
| 168 | int rtort_pipe_count; |
| 169 | struct list_head rtort_free; |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 170 | int rtort_mbtest; |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 171 | struct rcu_torture_reader_check *rtort_chkp; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 172 | }; |
| 173 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 174 | static LIST_HEAD(rcu_torture_freelist); |
Paul E. McKenney | 0ddea0e | 2010-09-19 21:06:14 -0700 | [diff] [blame] | 175 | static struct rcu_torture __rcu *rcu_torture_current; |
Paul E. McKenney | 4a29865 | 2011-04-03 21:33:51 -0700 | [diff] [blame] | 176 | static unsigned long rcu_torture_current_version; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 177 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
| 178 | static DEFINE_SPINLOCK(rcu_torture_lock); |
Paul E. McKenney | 67522be | 2016-03-01 08:52:19 -0800 | [diff] [blame] | 179 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); |
| 180 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 181 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 182 | static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 183 | static atomic_t n_rcu_torture_alloc; |
| 184 | static atomic_t n_rcu_torture_alloc_fail; |
| 185 | static atomic_t n_rcu_torture_free; |
| 186 | static atomic_t n_rcu_torture_mberror; |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 187 | static atomic_t n_rcu_torture_mbchk_fail; |
| 188 | static atomic_t n_rcu_torture_mbchk_tries; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 189 | static atomic_t n_rcu_torture_error; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 190 | static long n_rcu_torture_barrier_error; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 191 | static long n_rcu_torture_boost_ktrerror; |
| 192 | static long n_rcu_torture_boost_rterror; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 193 | static long n_rcu_torture_boost_failure; |
| 194 | static long n_rcu_torture_boosts; |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 195 | static atomic_long_t n_rcu_torture_timers; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 196 | static long n_barrier_attempts; |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 197 | static long n_barrier_successes; /* did rcu_barrier test succeed? */ |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 198 | static unsigned long n_read_exits; |
Josh Triplett | e303373 | 2006-10-04 02:17:14 -0700 | [diff] [blame] | 199 | static struct list_head rcu_torture_removed; |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 200 | static unsigned long shutdown_jiffies; |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 201 | static unsigned long start_gp_seq; |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 202 | static atomic_long_t n_nocb_offload; |
| 203 | static atomic_long_t n_nocb_deoffload; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 204 | |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 205 | static int rcu_torture_writer_state; |
| 206 | #define RTWS_FIXED_DELAY 0 |
| 207 | #define RTWS_DELAY 1 |
| 208 | #define RTWS_REPLACE 2 |
| 209 | #define RTWS_DEF_FREE 3 |
| 210 | #define RTWS_EXP_SYNC 4 |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 211 | #define RTWS_COND_GET 5 |
| 212 | #define RTWS_COND_SYNC 6 |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 213 | #define RTWS_POLL_GET 7 |
| 214 | #define RTWS_POLL_WAIT 8 |
| 215 | #define RTWS_SYNC 9 |
| 216 | #define RTWS_STUTTER 10 |
| 217 | #define RTWS_STOPPING 11 |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 218 | static const char * const rcu_torture_writer_state_names[] = { |
| 219 | "RTWS_FIXED_DELAY", |
| 220 | "RTWS_DELAY", |
| 221 | "RTWS_REPLACE", |
| 222 | "RTWS_DEF_FREE", |
| 223 | "RTWS_EXP_SYNC", |
| 224 | "RTWS_COND_GET", |
| 225 | "RTWS_COND_SYNC", |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 226 | "RTWS_POLL_GET", |
| 227 | "RTWS_POLL_WAIT", |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 228 | "RTWS_SYNC", |
| 229 | "RTWS_STUTTER", |
| 230 | "RTWS_STOPPING", |
| 231 | }; |
| 232 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 233 | /* Record reader segment types and duration for first failing read. */ |
| 234 | struct rt_read_seg { |
| 235 | int rt_readstate; |
| 236 | unsigned long rt_delay_jiffies; |
| 237 | unsigned long rt_delay_ms; |
| 238 | unsigned long rt_delay_us; |
| 239 | bool rt_preempted; |
| 240 | }; |
| 241 | static int err_segs_recorded; |
| 242 | static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; |
| 243 | static int rt_read_nsegs; |
| 244 | |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 245 | static const char *rcu_torture_writer_state_getname(void) |
| 246 | { |
| 247 | unsigned int i = READ_ONCE(rcu_torture_writer_state); |
| 248 | |
| 249 | if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) |
| 250 | return "???"; |
| 251 | return rcu_torture_writer_state_names[i]; |
| 252 | } |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 253 | |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 254 | #ifdef CONFIG_RCU_TRACE |
| 255 | static u64 notrace rcu_trace_clock_local(void) |
| 256 | { |
| 257 | u64 ts = trace_clock_local(); |
Paul E. McKenney | a3b7b6c | 2017-06-23 16:07:17 -0700 | [diff] [blame] | 258 | |
| 259 | (void)do_div(ts, NSEC_PER_USEC); |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 260 | return ts; |
| 261 | } |
| 262 | #else /* #ifdef CONFIG_RCU_TRACE */ |
| 263 | static u64 notrace rcu_trace_clock_local(void) |
| 264 | { |
| 265 | return 0ULL; |
| 266 | } |
| 267 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
| 268 | |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 269 | /* |
| 270 | * Stop aggressive CPU-hog tests a bit before the end of the test in order |
| 271 | * to avoid interfering with test shutdown. |
| 272 | */ |
| 273 | static bool shutdown_time_arrived(void) |
| 274 | { |
| 275 | return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); |
| 276 | } |
| 277 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 278 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
Pranith Kumar | 58ade2d | 2014-06-11 16:39:43 -0400 | [diff] [blame] | 279 | static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 280 | /* and boost task create/destroy. */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 281 | static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ |
Paul E. McKenney | c6ebcbb | 2012-05-28 19:21:41 -0700 | [diff] [blame] | 282 | static bool barrier_phase; /* Test phase. */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 283 | static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ |
| 284 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ |
| 285 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 286 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 287 | static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ |
| 288 | |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 289 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 290 | * Allocate an element from the rcu_tortures pool. |
| 291 | */ |
Adrian Bunk | 97a41e2 | 2006-01-08 01:02:17 -0800 | [diff] [blame] | 292 | static struct rcu_torture * |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 293 | rcu_torture_alloc(void) |
| 294 | { |
| 295 | struct list_head *p; |
| 296 | |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 297 | spin_lock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 298 | if (list_empty(&rcu_torture_freelist)) { |
| 299 | atomic_inc(&n_rcu_torture_alloc_fail); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 300 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 301 | return NULL; |
| 302 | } |
| 303 | atomic_inc(&n_rcu_torture_alloc); |
| 304 | p = rcu_torture_freelist.next; |
| 305 | list_del_init(p); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 306 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 307 | return container_of(p, struct rcu_torture, rtort_free); |
| 308 | } |
| 309 | |
| 310 | /* |
| 311 | * Free an element to the rcu_tortures pool. |
| 312 | */ |
| 313 | static void |
| 314 | rcu_torture_free(struct rcu_torture *p) |
| 315 | { |
| 316 | atomic_inc(&n_rcu_torture_free); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 317 | spin_lock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 318 | list_add_tail(&p->rtort_free, &rcu_torture_freelist); |
Ingo Molnar | adac166 | 2006-01-25 19:50:12 +0100 | [diff] [blame] | 319 | spin_unlock_bh(&rcu_torture_lock); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 320 | } |
| 321 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 322 | /* |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 323 | * Operations vector for selecting different types of tests. |
| 324 | */ |
| 325 | |
| 326 | struct rcu_torture_ops { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 327 | int ttype; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 328 | void (*init)(void); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 329 | void (*cleanup)(void); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 330 | int (*readlock)(void); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 331 | void (*read_delay)(struct torture_random_state *rrsp, |
| 332 | struct rt_read_seg *rtrsp); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 333 | void (*readunlock)(int idx); |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 334 | int (*readlock_held)(void); |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 335 | unsigned long (*get_gp_seq)(void); |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 336 | unsigned long (*gp_diff)(unsigned long new, unsigned long old); |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 337 | void (*deferred_free)(struct rcu_torture *p); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 338 | void (*sync)(void); |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 339 | void (*exp_sync)(void); |
Paul E. McKenney | fd56f64b | 2020-11-13 20:14:27 -0800 | [diff] [blame] | 340 | unsigned long (*get_gp_state)(void); |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 341 | unsigned long (*start_gp_poll)(void); |
| 342 | bool (*poll_gp_state)(unsigned long oldstate); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 343 | void (*cond_sync)(unsigned long oldstate); |
Boqun Feng | db3e8db | 2015-07-29 13:29:39 +0800 | [diff] [blame] | 344 | call_rcu_func_t call; |
Paul E. McKenney | 2326974 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 345 | void (*cb_barrier)(void); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 346 | void (*fqs)(void); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 347 | void (*stats)(void); |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 348 | void (*gp_kthread_dbg)(void); |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 349 | bool (*check_boost_failed)(unsigned long gp_state, int *cpup); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 350 | int (*stall_dur)(void); |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 351 | long cbflood_max; |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 352 | int irq_capable; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 353 | int can_boost; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 354 | int extendables; |
Paul E. McKenney | 5eabea5 | 2019-04-12 09:02:46 -0700 | [diff] [blame] | 355 | int slow_gps; |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 356 | int no_pi_lock; |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 357 | const char *name; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 358 | }; |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 359 | |
| 360 | static struct rcu_torture_ops *cur_ops; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 361 | |
| 362 | /* |
| 363 | * Definitions for rcu torture testing. |
| 364 | */ |
| 365 | |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 366 | static int torture_readlock_not_held(void) |
| 367 | { |
| 368 | return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); |
| 369 | } |
| 370 | |
Josh Triplett | a49a4af | 2006-09-29 01:59:30 -0700 | [diff] [blame] | 371 | static int rcu_torture_read_lock(void) __acquires(RCU) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 372 | { |
| 373 | rcu_read_lock(); |
| 374 | return 0; |
| 375 | } |
| 376 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 377 | static void |
| 378 | rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 379 | { |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 380 | unsigned long started; |
| 381 | unsigned long completed; |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 382 | const unsigned long shortdelay_us = 200; |
Paul E. McKenney | 1e69676 | 2018-07-20 12:04:12 -0700 | [diff] [blame] | 383 | unsigned long longdelay_ms = 300; |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 384 | unsigned long long ts; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 385 | |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 386 | /* We want a short delay sometimes to make a reader delay the grace |
| 387 | * period, and we want a long delay occasionally to trigger |
| 388 | * force_quiescent_state. */ |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 389 | |
Paul E. McKenney | 102c14d | 2019-12-21 11:23:50 -0800 | [diff] [blame] | 390 | if (!READ_ONCE(rcu_fwd_cb_nodelay) && |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 391 | !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 392 | started = cur_ops->get_gp_seq(); |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 393 | ts = rcu_trace_clock_local(); |
Paul E. McKenney | 1e69676 | 2018-07-20 12:04:12 -0700 | [diff] [blame] | 394 | if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) |
| 395 | longdelay_ms = 5; /* Avoid triggering BH limits. */ |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 396 | mdelay(longdelay_ms); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 397 | rtrsp->rt_delay_ms = longdelay_ms; |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 398 | completed = cur_ops->get_gp_seq(); |
Paul E. McKenney | d0af39e | 2016-10-10 18:26:04 -0700 | [diff] [blame] | 399 | do_trace_rcu_torture_read(cur_ops->name, NULL, ts, |
| 400 | started, completed); |
| 401 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 402 | if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { |
Josh Triplett | b8d57a7 | 2009-09-08 15:54:35 -0700 | [diff] [blame] | 403 | udelay(shortdelay_us); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 404 | rtrsp->rt_delay_us = shortdelay_us; |
| 405 | } |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 406 | if (!preempt_count() && |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 407 | !(torture_random(rrsp) % (nrealreaders * 500))) { |
Paul E. McKenney | cc1321c | 2017-10-16 11:05:03 -0700 | [diff] [blame] | 408 | torture_preempt_schedule(); /* QS only if preemptible. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 409 | rtrsp->rt_preempted = true; |
| 410 | } |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 411 | } |
| 412 | |
Josh Triplett | a49a4af | 2006-09-29 01:59:30 -0700 | [diff] [blame] | 413 | static void rcu_torture_read_unlock(int idx) __releases(RCU) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 414 | { |
| 415 | rcu_read_unlock(); |
| 416 | } |
| 417 | |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 418 | /* |
| 419 | * Update callback in the pipe. This should be invoked after a grace period. |
| 420 | */ |
| 421 | static bool |
| 422 | rcu_torture_pipe_update_one(struct rcu_torture *rp) |
| 423 | { |
| 424 | int i; |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 425 | struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 426 | |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 427 | if (rtrcp) { |
| 428 | WRITE_ONCE(rp->rtort_chkp, NULL); |
| 429 | smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). |
| 430 | } |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 431 | i = READ_ONCE(rp->rtort_pipe_count); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 432 | if (i > RCU_TORTURE_PIPE_LEN) |
| 433 | i = RCU_TORTURE_PIPE_LEN; |
| 434 | atomic_inc(&rcu_torture_wcount[i]); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 435 | WRITE_ONCE(rp->rtort_pipe_count, i + 1); |
| 436 | if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 437 | rp->rtort_mbtest = 0; |
| 438 | return true; |
| 439 | } |
| 440 | return false; |
| 441 | } |
| 442 | |
| 443 | /* |
| 444 | * Update all callbacks in the pipe. Suitable for synchronous grace-period |
| 445 | * primitives. |
| 446 | */ |
| 447 | static void |
| 448 | rcu_torture_pipe_update(struct rcu_torture *old_rp) |
| 449 | { |
| 450 | struct rcu_torture *rp; |
| 451 | struct rcu_torture *rp1; |
| 452 | |
| 453 | if (old_rp) |
| 454 | list_add(&old_rp->rtort_free, &rcu_torture_removed); |
| 455 | list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { |
| 456 | if (rcu_torture_pipe_update_one(rp)) { |
| 457 | list_del(&rp->rtort_free); |
| 458 | rcu_torture_free(rp); |
| 459 | } |
| 460 | } |
| 461 | } |
| 462 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 463 | static void |
| 464 | rcu_torture_cb(struct rcu_head *p) |
| 465 | { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 466 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
| 467 | |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 468 | if (torture_must_stop_irq()) { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 469 | /* Test is ending, just drop callbacks on the floor. */ |
| 470 | /* The next initialization will pick up the pieces. */ |
| 471 | return; |
| 472 | } |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 473 | if (rcu_torture_pipe_update_one(rp)) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 474 | rcu_torture_free(rp); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 475 | else |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 476 | cur_ops->deferred_free(rp); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 477 | } |
| 478 | |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 479 | static unsigned long rcu_no_completed(void) |
Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 480 | { |
| 481 | return 0; |
| 482 | } |
| 483 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 484 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
| 485 | { |
| 486 | call_rcu(&p->rtort_rcu, rcu_torture_cb); |
| 487 | } |
| 488 | |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 489 | static void rcu_sync_torture_init(void) |
| 490 | { |
| 491 | INIT_LIST_HEAD(&rcu_torture_removed); |
| 492 | } |
| 493 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 494 | static struct rcu_torture_ops rcu_ops = { |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 495 | .ttype = RCU_FLAVOR, |
| 496 | .init = rcu_sync_torture_init, |
| 497 | .readlock = rcu_torture_read_lock, |
| 498 | .read_delay = rcu_read_delay, |
| 499 | .readunlock = rcu_torture_read_unlock, |
| 500 | .readlock_held = torture_readlock_not_held, |
| 501 | .get_gp_seq = rcu_get_gp_seq, |
| 502 | .gp_diff = rcu_seq_diff, |
| 503 | .deferred_free = rcu_torture_deferred_free, |
| 504 | .sync = synchronize_rcu, |
| 505 | .exp_sync = synchronize_rcu_expedited, |
| 506 | .get_gp_state = get_state_synchronize_rcu, |
| 507 | .start_gp_poll = start_poll_synchronize_rcu, |
| 508 | .poll_gp_state = poll_state_synchronize_rcu, |
| 509 | .cond_sync = cond_synchronize_rcu, |
| 510 | .call = call_rcu, |
| 511 | .cb_barrier = rcu_barrier, |
| 512 | .fqs = rcu_force_quiescent_state, |
| 513 | .stats = NULL, |
| 514 | .gp_kthread_dbg = show_rcu_gp_kthreads, |
| 515 | .check_boost_failed = rcu_check_boost_fail, |
| 516 | .stall_dur = rcu_jiffies_till_stall_check, |
| 517 | .irq_capable = 1, |
| 518 | .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), |
| 519 | .extendables = RCUTORTURE_MAX_EXTEND, |
| 520 | .name = "rcu" |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 521 | }; |
| 522 | |
Paul E. McKenney | c32e066 | 2006-06-27 02:54:04 -0700 | [diff] [blame] | 523 | /* |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 524 | * Don't even think about trying any of these in real life!!! |
| 525 | * The names includes "busted", and they really means it! |
| 526 | * The only purpose of these functions is to provide a buggy RCU |
| 527 | * implementation to make sure that rcutorture correctly emits |
| 528 | * buggy-RCU error messages. |
| 529 | */ |
| 530 | static void rcu_busted_torture_deferred_free(struct rcu_torture *p) |
| 531 | { |
| 532 | /* This is a deliberate bug for testing purposes only! */ |
| 533 | rcu_torture_cb(&p->rtort_rcu); |
| 534 | } |
| 535 | |
| 536 | static void synchronize_rcu_busted(void) |
| 537 | { |
| 538 | /* This is a deliberate bug for testing purposes only! */ |
| 539 | } |
| 540 | |
| 541 | static void |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 542 | call_rcu_busted(struct rcu_head *head, rcu_callback_t func) |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 543 | { |
| 544 | /* This is a deliberate bug for testing purposes only! */ |
| 545 | func(head); |
| 546 | } |
| 547 | |
| 548 | static struct rcu_torture_ops rcu_busted_ops = { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 549 | .ttype = INVALID_RCU_FLAVOR, |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 550 | .init = rcu_sync_torture_init, |
| 551 | .readlock = rcu_torture_read_lock, |
| 552 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 553 | .readunlock = rcu_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 554 | .readlock_held = torture_readlock_not_held, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 555 | .get_gp_seq = rcu_no_completed, |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 556 | .deferred_free = rcu_busted_torture_deferred_free, |
| 557 | .sync = synchronize_rcu_busted, |
| 558 | .exp_sync = synchronize_rcu_busted, |
| 559 | .call = call_rcu_busted, |
| 560 | .cb_barrier = NULL, |
| 561 | .fqs = NULL, |
| 562 | .stats = NULL, |
| 563 | .irq_capable = 1, |
Paul E. McKenney | b3c9831 | 2017-06-06 16:39:00 -0700 | [diff] [blame] | 564 | .name = "busted" |
Paul E. McKenney | ff20e25 | 2014-02-06 08:45:56 -0800 | [diff] [blame] | 565 | }; |
| 566 | |
| 567 | /* |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 568 | * Definitions for srcu torture testing. |
| 569 | */ |
| 570 | |
Lai Jiangshan | cda4dc8 | 2012-10-13 01:14:17 +0800 | [diff] [blame] | 571 | DEFINE_STATIC_SRCU(srcu_ctl); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 572 | static struct srcu_struct srcu_ctld; |
| 573 | static struct srcu_struct *srcu_ctlp = &srcu_ctl; |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 574 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 575 | static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 576 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 577 | return srcu_read_lock(srcu_ctlp); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 578 | } |
| 579 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 580 | static void |
| 581 | srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 582 | { |
| 583 | long delay; |
| 584 | const long uspertick = 1000000 / HZ; |
| 585 | const long longdelay = 10; |
| 586 | |
| 587 | /* We want there to be long-running readers, but not all the time. */ |
| 588 | |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 589 | delay = torture_random(rrsp) % |
| 590 | (nrealreaders * 2 * longdelay * uspertick); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 591 | if (!delay && in_task()) { |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 592 | schedule_timeout_interruptible(longdelay); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 593 | rtrsp->rt_delay_jiffies = longdelay; |
| 594 | } else { |
| 595 | rcu_read_delay(rrsp, rtrsp); |
| 596 | } |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 597 | } |
| 598 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 599 | static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 600 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 601 | srcu_read_unlock(srcu_ctlp, idx); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 602 | } |
| 603 | |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 604 | static int torture_srcu_read_lock_held(void) |
| 605 | { |
| 606 | return srcu_read_lock_held(srcu_ctlp); |
| 607 | } |
| 608 | |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 609 | static unsigned long srcu_torture_completed(void) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 610 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 611 | return srcu_batches_completed(srcu_ctlp); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 612 | } |
| 613 | |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 614 | static void srcu_torture_deferred_free(struct rcu_torture *rp) |
| 615 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 616 | call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 617 | } |
| 618 | |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 619 | static void srcu_torture_synchronize(void) |
| 620 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 621 | synchronize_srcu(srcu_ctlp); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 622 | } |
| 623 | |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 624 | static unsigned long srcu_torture_get_gp_state(void) |
| 625 | { |
| 626 | return get_state_synchronize_srcu(srcu_ctlp); |
| 627 | } |
| 628 | |
| 629 | static unsigned long srcu_torture_start_gp_poll(void) |
| 630 | { |
| 631 | return start_poll_synchronize_srcu(srcu_ctlp); |
| 632 | } |
| 633 | |
| 634 | static bool srcu_torture_poll_gp_state(unsigned long oldstate) |
| 635 | { |
| 636 | return poll_state_synchronize_srcu(srcu_ctlp, oldstate); |
| 637 | } |
| 638 | |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 639 | static void srcu_torture_call(struct rcu_head *head, |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 640 | rcu_callback_t func) |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 641 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 642 | call_srcu(srcu_ctlp, head, func); |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 643 | } |
| 644 | |
| 645 | static void srcu_torture_barrier(void) |
| 646 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 647 | srcu_barrier(srcu_ctlp); |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 648 | } |
| 649 | |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 650 | static void srcu_torture_stats(void) |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 651 | { |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 652 | srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 653 | } |
| 654 | |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 655 | static void srcu_torture_synchronize_expedited(void) |
| 656 | { |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 657 | synchronize_srcu_expedited(srcu_ctlp); |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 658 | } |
| 659 | |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 660 | static struct rcu_torture_ops srcu_ops = { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 661 | .ttype = SRCU_FLAVOR, |
Lai Jiangshan | cda4dc8 | 2012-10-13 01:14:17 +0800 | [diff] [blame] | 662 | .init = rcu_sync_torture_init, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 663 | .readlock = srcu_torture_read_lock, |
| 664 | .read_delay = srcu_read_delay, |
| 665 | .readunlock = srcu_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 666 | .readlock_held = torture_srcu_read_lock_held, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 667 | .get_gp_seq = srcu_torture_completed, |
Lai Jiangshan | 9059c94 | 2012-03-19 16:12:14 +0800 | [diff] [blame] | 668 | .deferred_free = srcu_torture_deferred_free, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 669 | .sync = srcu_torture_synchronize, |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 670 | .exp_sync = srcu_torture_synchronize_expedited, |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 671 | .get_gp_state = srcu_torture_get_gp_state, |
| 672 | .start_gp_poll = srcu_torture_start_gp_poll, |
| 673 | .poll_gp_state = srcu_torture_poll_gp_state, |
Paul E. McKenney | e3f8d37 | 2012-05-08 10:21:50 -0700 | [diff] [blame] | 674 | .call = srcu_torture_call, |
| 675 | .cb_barrier = srcu_torture_barrier, |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 676 | .stats = srcu_torture_stats, |
Paul E. McKenney | 5e741fa | 2017-06-06 12:52:44 -0700 | [diff] [blame] | 677 | .irq_capable = 1, |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 678 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 679 | .name = "srcu" |
Paul E. McKenney | b2896d2 | 2006-10-04 02:17:03 -0700 | [diff] [blame] | 680 | }; |
| 681 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 682 | static void srcu_torture_init(void) |
| 683 | { |
| 684 | rcu_sync_torture_init(); |
| 685 | WARN_ON(init_srcu_struct(&srcu_ctld)); |
| 686 | srcu_ctlp = &srcu_ctld; |
| 687 | } |
| 688 | |
| 689 | static void srcu_torture_cleanup(void) |
| 690 | { |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 691 | cleanup_srcu_struct(&srcu_ctld); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 692 | srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ |
| 693 | } |
| 694 | |
| 695 | /* As above, but dynamically allocated. */ |
| 696 | static struct rcu_torture_ops srcud_ops = { |
| 697 | .ttype = SRCU_FLAVOR, |
| 698 | .init = srcu_torture_init, |
| 699 | .cleanup = srcu_torture_cleanup, |
| 700 | .readlock = srcu_torture_read_lock, |
| 701 | .read_delay = srcu_read_delay, |
| 702 | .readunlock = srcu_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 703 | .readlock_held = torture_srcu_read_lock_held, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 704 | .get_gp_seq = srcu_torture_completed, |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 705 | .deferred_free = srcu_torture_deferred_free, |
| 706 | .sync = srcu_torture_synchronize, |
| 707 | .exp_sync = srcu_torture_synchronize_expedited, |
| 708 | .call = srcu_torture_call, |
| 709 | .cb_barrier = srcu_torture_barrier, |
| 710 | .stats = srcu_torture_stats, |
Paul E. McKenney | 5e741fa | 2017-06-06 12:52:44 -0700 | [diff] [blame] | 711 | .irq_capable = 1, |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 712 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 713 | .name = "srcud" |
| 714 | }; |
| 715 | |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 716 | /* As above, but broken due to inappropriate reader extension. */ |
| 717 | static struct rcu_torture_ops busted_srcud_ops = { |
| 718 | .ttype = SRCU_FLAVOR, |
| 719 | .init = srcu_torture_init, |
| 720 | .cleanup = srcu_torture_cleanup, |
| 721 | .readlock = srcu_torture_read_lock, |
| 722 | .read_delay = rcu_read_delay, |
| 723 | .readunlock = srcu_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 724 | .readlock_held = torture_srcu_read_lock_held, |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 725 | .get_gp_seq = srcu_torture_completed, |
| 726 | .deferred_free = srcu_torture_deferred_free, |
| 727 | .sync = srcu_torture_synchronize, |
| 728 | .exp_sync = srcu_torture_synchronize_expedited, |
| 729 | .call = srcu_torture_call, |
| 730 | .cb_barrier = srcu_torture_barrier, |
| 731 | .stats = srcu_torture_stats, |
| 732 | .irq_capable = 1, |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 733 | .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 734 | .extendables = RCUTORTURE_MAX_EXTEND, |
| 735 | .name = "busted_srcud" |
| 736 | }; |
| 737 | |
Josh Triplett | 4b6c2cc | 2006-10-04 02:17:16 -0700 | [diff] [blame] | 738 | /* |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 739 | * Definitions for RCU-tasks torture testing. |
| 740 | */ |
| 741 | |
| 742 | static int tasks_torture_read_lock(void) |
| 743 | { |
| 744 | return 0; |
| 745 | } |
| 746 | |
| 747 | static void tasks_torture_read_unlock(int idx) |
| 748 | { |
| 749 | } |
| 750 | |
| 751 | static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) |
| 752 | { |
| 753 | call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); |
| 754 | } |
| 755 | |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 756 | static void synchronize_rcu_mult_test(void) |
| 757 | { |
| 758 | synchronize_rcu_mult(call_rcu_tasks, call_rcu); |
| 759 | } |
| 760 | |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 761 | static struct rcu_torture_ops tasks_ops = { |
| 762 | .ttype = RCU_TASKS_FLAVOR, |
| 763 | .init = rcu_sync_torture_init, |
| 764 | .readlock = tasks_torture_read_lock, |
| 765 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 766 | .readunlock = tasks_torture_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 767 | .get_gp_seq = rcu_no_completed, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 768 | .deferred_free = rcu_tasks_torture_deferred_free, |
| 769 | .sync = synchronize_rcu_tasks, |
Paul E. McKenney | 9cf8fc6 | 2020-03-06 14:00:46 -0800 | [diff] [blame] | 770 | .exp_sync = synchronize_rcu_mult_test, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 771 | .call = call_rcu_tasks, |
| 772 | .cb_barrier = rcu_barrier_tasks, |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 773 | .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 774 | .fqs = NULL, |
| 775 | .stats = NULL, |
| 776 | .irq_capable = 1, |
Paul E. McKenney | 5eabea5 | 2019-04-12 09:02:46 -0700 | [diff] [blame] | 777 | .slow_gps = 1, |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 778 | .name = "tasks" |
| 779 | }; |
| 780 | |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 781 | /* |
| 782 | * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. |
| 783 | * This implementation does not necessarily work well with CPU hotplug. |
| 784 | */ |
| 785 | |
| 786 | static void synchronize_rcu_trivial(void) |
| 787 | { |
| 788 | int cpu; |
| 789 | |
| 790 | for_each_online_cpu(cpu) { |
| 791 | rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); |
| 792 | WARN_ON_ONCE(raw_smp_processor_id() != cpu); |
| 793 | } |
| 794 | } |
| 795 | |
| 796 | static int rcu_torture_read_lock_trivial(void) __acquires(RCU) |
| 797 | { |
| 798 | preempt_disable(); |
| 799 | return 0; |
| 800 | } |
| 801 | |
| 802 | static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) |
| 803 | { |
| 804 | preempt_enable(); |
| 805 | } |
| 806 | |
| 807 | static struct rcu_torture_ops trivial_ops = { |
| 808 | .ttype = RCU_TRIVIAL_FLAVOR, |
| 809 | .init = rcu_sync_torture_init, |
| 810 | .readlock = rcu_torture_read_lock_trivial, |
| 811 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 812 | .readunlock = rcu_torture_read_unlock_trivial, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 813 | .readlock_held = torture_readlock_not_held, |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 814 | .get_gp_seq = rcu_no_completed, |
| 815 | .sync = synchronize_rcu_trivial, |
| 816 | .exp_sync = synchronize_rcu_trivial, |
| 817 | .fqs = NULL, |
| 818 | .stats = NULL, |
| 819 | .irq_capable = 1, |
| 820 | .name = "trivial" |
| 821 | }; |
| 822 | |
Paul E. McKenney | 3d6e43c | 2020-03-03 15:02:50 -0800 | [diff] [blame] | 823 | /* |
| 824 | * Definitions for rude RCU-tasks torture testing. |
| 825 | */ |
| 826 | |
| 827 | static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) |
| 828 | { |
| 829 | call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); |
| 830 | } |
| 831 | |
| 832 | static struct rcu_torture_ops tasks_rude_ops = { |
| 833 | .ttype = RCU_TASKS_RUDE_FLAVOR, |
| 834 | .init = rcu_sync_torture_init, |
| 835 | .readlock = rcu_torture_read_lock_trivial, |
| 836 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
| 837 | .readunlock = rcu_torture_read_unlock_trivial, |
| 838 | .get_gp_seq = rcu_no_completed, |
| 839 | .deferred_free = rcu_tasks_rude_torture_deferred_free, |
| 840 | .sync = synchronize_rcu_tasks_rude, |
| 841 | .exp_sync = synchronize_rcu_tasks_rude, |
| 842 | .call = call_rcu_tasks_rude, |
| 843 | .cb_barrier = rcu_barrier_tasks_rude, |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 844 | .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 845 | .cbflood_max = 50000, |
Paul E. McKenney | 3d6e43c | 2020-03-03 15:02:50 -0800 | [diff] [blame] | 846 | .fqs = NULL, |
| 847 | .stats = NULL, |
| 848 | .irq_capable = 1, |
| 849 | .name = "tasks-rude" |
| 850 | }; |
| 851 | |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 852 | /* |
| 853 | * Definitions for tracing RCU-tasks torture testing. |
| 854 | */ |
| 855 | |
| 856 | static int tasks_tracing_torture_read_lock(void) |
| 857 | { |
| 858 | rcu_read_lock_trace(); |
| 859 | return 0; |
| 860 | } |
| 861 | |
| 862 | static void tasks_tracing_torture_read_unlock(int idx) |
| 863 | { |
| 864 | rcu_read_unlock_trace(); |
| 865 | } |
| 866 | |
| 867 | static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) |
| 868 | { |
| 869 | call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); |
| 870 | } |
| 871 | |
| 872 | static struct rcu_torture_ops tasks_tracing_ops = { |
| 873 | .ttype = RCU_TASKS_TRACING_FLAVOR, |
| 874 | .init = rcu_sync_torture_init, |
| 875 | .readlock = tasks_tracing_torture_read_lock, |
| 876 | .read_delay = srcu_read_delay, /* just reuse srcu's version. */ |
| 877 | .readunlock = tasks_tracing_torture_read_unlock, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 878 | .readlock_held = rcu_read_lock_trace_held, |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 879 | .get_gp_seq = rcu_no_completed, |
| 880 | .deferred_free = rcu_tasks_tracing_torture_deferred_free, |
| 881 | .sync = synchronize_rcu_tasks_trace, |
| 882 | .exp_sync = synchronize_rcu_tasks_trace, |
| 883 | .call = call_rcu_tasks_trace, |
| 884 | .cb_barrier = rcu_barrier_tasks_trace, |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 885 | .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 886 | .cbflood_max = 50000, |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 887 | .fqs = NULL, |
| 888 | .stats = NULL, |
| 889 | .irq_capable = 1, |
| 890 | .slow_gps = 1, |
| 891 | .name = "tasks-tracing" |
| 892 | }; |
| 893 | |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 894 | static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) |
| 895 | { |
| 896 | if (!cur_ops->gp_diff) |
| 897 | return new - old; |
| 898 | return cur_ops->gp_diff(new, old); |
| 899 | } |
| 900 | |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 901 | /* |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 902 | * RCU torture priority-boost testing. Runs one real-time thread per |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 903 | * CPU for moderate bursts, repeatedly starting grace periods and waiting |
| 904 | * for them to complete. If a given grace period takes too long, we assume |
| 905 | * that priority inversion has occurred. |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 906 | */ |
| 907 | |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 908 | static int old_rt_runtime = -1; |
| 909 | |
| 910 | static void rcu_torture_disable_rt_throttle(void) |
| 911 | { |
| 912 | /* |
| 913 | * Disable RT throttling so that rcutorture's boost threads don't get |
| 914 | * throttled. Only possible if rcutorture is built-in otherwise the |
| 915 | * user should manually do this by setting the sched_rt_period_us and |
| 916 | * sched_rt_runtime sysctls. |
| 917 | */ |
| 918 | if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) |
| 919 | return; |
| 920 | |
| 921 | old_rt_runtime = sysctl_sched_rt_runtime; |
| 922 | sysctl_sched_rt_runtime = -1; |
| 923 | } |
| 924 | |
| 925 | static void rcu_torture_enable_rt_throttle(void) |
| 926 | { |
| 927 | if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) |
| 928 | return; |
| 929 | |
| 930 | sysctl_sched_rt_runtime = old_rt_runtime; |
| 931 | old_rt_runtime = -1; |
| 932 | } |
| 933 | |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 934 | static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 935 | { |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 936 | int cpu; |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 937 | static int dbg_done; |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 938 | unsigned long end = jiffies; |
Paul E. McKenney | bcd4af4 | 2021-04-08 10:46:55 -0700 | [diff] [blame] | 939 | bool gp_done; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 940 | unsigned long j; |
| 941 | static unsigned long last_persist; |
| 942 | unsigned long lp; |
| 943 | unsigned long mininterval = test_boost_duration * HZ - HZ / 2; |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 944 | |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 945 | if (end - *start > mininterval) { |
Paul E. McKenney | 7b9dad7 | 2021-04-07 17:09:37 -0700 | [diff] [blame] | 946 | // Recheck after checking time to avoid false positives. |
| 947 | smp_mb(); // Time check before grace-period check. |
| 948 | if (cur_ops->poll_gp_state(gp_state)) |
| 949 | return false; // passed, though perhaps just barely |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 950 | if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { |
| 951 | // At most one persisted message per boost test. |
| 952 | j = jiffies; |
| 953 | lp = READ_ONCE(last_persist); |
| 954 | if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) |
| 955 | pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); |
| 956 | return false; // passed on a technicality |
| 957 | } |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 958 | VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); |
| 959 | n_rcu_torture_boost_failure++; |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 960 | if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { |
| 961 | pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 962 | current->rt_priority, gp_state, end - *start); |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 963 | cur_ops->gp_kthread_dbg(); |
Paul E. McKenney | bcd4af4 | 2021-04-08 10:46:55 -0700 | [diff] [blame] | 964 | // Recheck after print to flag grace period ending during splat. |
| 965 | gp_done = cur_ops->poll_gp_state(gp_state); |
| 966 | pr_info("Boost inversion: GP %lu %s.\n", gp_state, |
| 967 | gp_done ? "ended already" : "still pending"); |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 968 | |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 969 | } |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 970 | |
Paul E. McKenney | 7b9dad7 | 2021-04-07 17:09:37 -0700 | [diff] [blame] | 971 | return true; // failed |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 972 | } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { |
| 973 | *start = jiffies; |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 974 | } |
| 975 | |
Paul E. McKenney | 7b9dad7 | 2021-04-07 17:09:37 -0700 | [diff] [blame] | 976 | return false; // passed |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 977 | } |
| 978 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 979 | static int rcu_torture_boost(void *arg) |
| 980 | { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 981 | unsigned long endtime; |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 982 | unsigned long gp_state; |
| 983 | unsigned long gp_state_time; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 984 | unsigned long oldstarttime; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 985 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 986 | VERBOSE_TOROUT_STRING("rcu_torture_boost started"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 987 | |
| 988 | /* Set real-time priority. */ |
Peter Zijlstra | 8b70098 | 2020-04-22 13:10:04 +0200 | [diff] [blame] | 989 | sched_set_fifo_low(current); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 990 | |
| 991 | /* Each pass through the following loop does one boost-test cycle. */ |
| 992 | do { |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 993 | bool failed = false; // Test failed already in this test interval |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 994 | bool gp_initiated = false; |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 995 | |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 996 | if (kthread_should_stop()) |
| 997 | goto checkwait; |
| 998 | |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 999 | /* Wait for the next test interval. */ |
| 1000 | oldstarttime = boost_starttime; |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 1001 | while (time_before(jiffies, oldstarttime)) { |
Paul E. McKenney | 0e11c8e | 2013-01-10 16:21:07 -0800 | [diff] [blame] | 1002 | schedule_timeout_interruptible(oldstarttime - jiffies); |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1003 | if (stutter_wait("rcu_torture_boost")) |
| 1004 | sched_set_fifo_low(current); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1005 | if (torture_must_stop()) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1006 | goto checkwait; |
| 1007 | } |
| 1008 | |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1009 | // Do one boost-test interval. |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1010 | endtime = oldstarttime + test_boost_duration * HZ; |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 1011 | while (time_before(jiffies, endtime)) { |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1012 | // Has current GP gone too long? |
| 1013 | if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 1014 | failed = rcu_torture_boost_failed(gp_state, &gp_state_time); |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1015 | // If we don't have a grace period in flight, start one. |
| 1016 | if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { |
| 1017 | gp_state = cur_ops->start_gp_poll(); |
| 1018 | gp_initiated = true; |
| 1019 | gp_state_time = jiffies; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1020 | } |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1021 | if (stutter_wait("rcu_torture_boost")) { |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1022 | sched_set_fifo_low(current); |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1023 | // If the grace period already ended, |
| 1024 | // we don't know when that happened, so |
| 1025 | // start over. |
| 1026 | if (cur_ops->poll_gp_state(gp_state)) |
| 1027 | gp_initiated = false; |
| 1028 | } |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1029 | if (torture_must_stop()) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1030 | goto checkwait; |
| 1031 | } |
| 1032 | |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1033 | // In case the grace period extended beyond the end of the loop. |
| 1034 | if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 1035 | rcu_torture_boost_failed(gp_state, &gp_state_time); |
Joel Fernandes (Google) | 3b745c8 | 2018-06-10 16:45:44 -0700 | [diff] [blame] | 1036 | |
| 1037 | /* |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1038 | * Set the start time of the next test interval. |
| 1039 | * Yes, this is vulnerable to long delays, but such |
| 1040 | * delays simply cause a false negative for the next |
| 1041 | * interval. Besides, we are running at RT priority, |
| 1042 | * so delays should be relatively rare. |
| 1043 | */ |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1044 | while (oldstarttime == boost_starttime && !kthread_should_stop()) { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1045 | if (mutex_trylock(&boost_mutex)) { |
Paul E. McKenney | 8c7ec02 | 2021-04-07 20:00:00 -0700 | [diff] [blame] | 1046 | if (oldstarttime == boost_starttime) { |
| 1047 | boost_starttime = jiffies + test_boost_interval * HZ; |
| 1048 | n_rcu_torture_boosts++; |
| 1049 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1050 | mutex_unlock(&boost_mutex); |
| 1051 | break; |
| 1052 | } |
| 1053 | schedule_timeout_uninterruptible(1); |
| 1054 | } |
| 1055 | |
| 1056 | /* Go do the stutter. */ |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1057 | checkwait: if (stutter_wait("rcu_torture_boost")) |
| 1058 | sched_set_fifo_low(current); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1059 | } while (!torture_must_stop()); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1060 | |
| 1061 | /* Clean up and exit. */ |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 1062 | while (!kthread_should_stop()) { |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1063 | torture_shutdown_absorb("rcu_torture_boost"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1064 | schedule_timeout_uninterruptible(1); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1065 | } |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1066 | torture_kthread_stopping("rcu_torture_boost"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 1067 | return 0; |
| 1068 | } |
| 1069 | |
| 1070 | /* |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1071 | * RCU torture force-quiescent-state kthread. Repeatedly induces |
| 1072 | * bursts of calls to force_quiescent_state(), increasing the probability |
| 1073 | * of occurrence of some important types of race conditions. |
| 1074 | */ |
| 1075 | static int |
| 1076 | rcu_torture_fqs(void *arg) |
| 1077 | { |
| 1078 | unsigned long fqs_resume_time; |
| 1079 | int fqs_burst_remaining; |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1080 | int oldnice = task_nice(current); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1081 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1082 | VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1083 | do { |
| 1084 | fqs_resume_time = jiffies + fqs_stutter * HZ; |
Paul E. McKenney | 3c80b40 | 2020-04-10 15:37:12 -0700 | [diff] [blame] | 1085 | while (time_before(jiffies, fqs_resume_time) && |
Paul E. McKenney | 93898fb | 2011-08-17 12:39:34 -0700 | [diff] [blame] | 1086 | !kthread_should_stop()) { |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1087 | schedule_timeout_interruptible(1); |
| 1088 | } |
| 1089 | fqs_burst_remaining = fqs_duration; |
Paul E. McKenney | 93898fb | 2011-08-17 12:39:34 -0700 | [diff] [blame] | 1090 | while (fqs_burst_remaining > 0 && |
| 1091 | !kthread_should_stop()) { |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1092 | cur_ops->fqs(); |
| 1093 | udelay(fqs_holdoff); |
| 1094 | fqs_burst_remaining -= fqs_holdoff; |
| 1095 | } |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1096 | if (stutter_wait("rcu_torture_fqs")) |
| 1097 | sched_set_normal(current, oldnice); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1098 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1099 | torture_kthread_stopping("rcu_torture_fqs"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 1100 | return 0; |
| 1101 | } |
| 1102 | |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 1103 | // Used by writers to randomly choose from the available grace-period |
| 1104 | // primitives. The only purpose of the initialization is to size the array. |
| 1105 | static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC }; |
| 1106 | static int nsynctypes; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1107 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1108 | /* |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 1109 | * Determine which grace-period primitives are available. |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 1110 | */ |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 1111 | static void rcu_torture_write_types(void) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 1112 | { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1113 | bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1114 | bool gp_poll1 = gp_poll, gp_sync1 = gp_sync; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1115 | |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1116 | /* Initialize synctype[] array. If none set, take default. */ |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1117 | if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1) |
| 1118 | gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true; |
Paul E. McKenney | fd56f64b | 2020-11-13 20:14:27 -0800 | [diff] [blame] | 1119 | if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1120 | synctype[nsynctypes++] = RTWS_COND_GET; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1121 | pr_info("%s: Testing conditional GPs.\n", __func__); |
Paul E. McKenney | fd56f64b | 2020-11-13 20:14:27 -0800 | [diff] [blame] | 1122 | } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1123 | pr_alert("%s: gp_cond without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1124 | } |
| 1125 | if (gp_exp1 && cur_ops->exp_sync) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1126 | synctype[nsynctypes++] = RTWS_EXP_SYNC; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1127 | pr_info("%s: Testing expedited GPs.\n", __func__); |
| 1128 | } else if (gp_exp && !cur_ops->exp_sync) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1129 | pr_alert("%s: gp_exp without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1130 | } |
| 1131 | if (gp_normal1 && cur_ops->deferred_free) { |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1132 | synctype[nsynctypes++] = RTWS_DEF_FREE; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1133 | pr_info("%s: Testing asynchronous GPs.\n", __func__); |
| 1134 | } else if (gp_normal && !cur_ops->deferred_free) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1135 | pr_alert("%s: gp_normal without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1136 | } |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1137 | if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) { |
| 1138 | synctype[nsynctypes++] = RTWS_POLL_GET; |
| 1139 | pr_info("%s: Testing polling GPs.\n", __func__); |
| 1140 | } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { |
| 1141 | pr_alert("%s: gp_poll without primitives.\n", __func__); |
| 1142 | } |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1143 | if (gp_sync1 && cur_ops->sync) { |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1144 | synctype[nsynctypes++] = RTWS_SYNC; |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1145 | pr_info("%s: Testing normal GPs.\n", __func__); |
| 1146 | } else if (gp_sync && !cur_ops->sync) { |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 1147 | pr_alert("%s: gp_sync without primitives.\n", __func__); |
Paul E. McKenney | db0c1a8 | 2017-12-08 12:23:10 -0800 | [diff] [blame] | 1148 | } |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 1149 | } |
| 1150 | |
| 1151 | /* |
| 1152 | * RCU torture writer kthread. Repeatedly substitutes a new structure |
| 1153 | * for that pointed to by rcu_torture_current, freeing the old structure |
| 1154 | * after a series of grace periods (the "pipeline"). |
| 1155 | */ |
| 1156 | static int |
| 1157 | rcu_torture_writer(void *arg) |
| 1158 | { |
| 1159 | bool boot_ended; |
| 1160 | bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); |
| 1161 | unsigned long cookie; |
| 1162 | int expediting = 0; |
| 1163 | unsigned long gp_snap; |
| 1164 | int i; |
| 1165 | int idx; |
| 1166 | int oldnice = task_nice(current); |
| 1167 | struct rcu_torture *rp; |
| 1168 | struct rcu_torture *old_rp; |
| 1169 | static DEFINE_TORTURE_RANDOM(rand); |
| 1170 | bool stutter_waited; |
| 1171 | |
| 1172 | VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); |
| 1173 | if (!can_expedite) |
| 1174 | pr_alert("%s" TORTURE_FLAG |
| 1175 | " GP expediting controlled from boot/sysfs for %s.\n", |
| 1176 | torture_type, cur_ops->name); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1177 | if (WARN_ONCE(nsynctypes == 0, |
| 1178 | "rcu_torture_writer: No update-side primitives.\n")) { |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1179 | /* |
| 1180 | * No updates primitives, so don't try updating. |
| 1181 | * The resulting test won't be testing much, hence the |
| 1182 | * above WARN_ONCE(). |
| 1183 | */ |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1184 | rcu_torture_writer_state = RTWS_STOPPING; |
| 1185 | torture_kthread_stopping("rcu_torture_writer"); |
| 1186 | } |
| 1187 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1188 | do { |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1189 | rcu_torture_writer_state = RTWS_FIXED_DELAY; |
Paul E. McKenney | 1eba0ef | 2020-11-17 14:12:24 -0800 | [diff] [blame] | 1190 | torture_hrtimeout_us(500, 1000, &rand); |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 1191 | rp = rcu_torture_alloc(); |
| 1192 | if (rp == NULL) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1193 | continue; |
| 1194 | rp->rtort_pipe_count = 0; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1195 | rcu_torture_writer_state = RTWS_DELAY; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1196 | udelay(torture_random(&rand) & 0x3ff); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1197 | rcu_torture_writer_state = RTWS_REPLACE; |
Paul E. McKenney | 0ddea0e | 2010-09-19 21:06:14 -0700 | [diff] [blame] | 1198 | old_rp = rcu_dereference_check(rcu_torture_current, |
| 1199 | current == writer_task); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1200 | rp->rtort_mbtest = 1; |
| 1201 | rcu_assign_pointer(rcu_torture_current, rp); |
Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 1202 | smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 1203 | if (old_rp) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1204 | i = old_rp->rtort_pipe_count; |
| 1205 | if (i > RCU_TORTURE_PIPE_LEN) |
| 1206 | i = RCU_TORTURE_PIPE_LEN; |
| 1207 | atomic_inc(&rcu_torture_wcount[i]); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 1208 | WRITE_ONCE(old_rp->rtort_pipe_count, |
| 1209 | old_rp->rtort_pipe_count + 1); |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1210 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { |
| 1211 | idx = cur_ops->readlock(); |
| 1212 | cookie = cur_ops->get_gp_state(); |
| 1213 | WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE && |
| 1214 | cur_ops->poll_gp_state(cookie), |
| 1215 | "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", |
| 1216 | __func__, |
| 1217 | rcu_torture_writer_state_getname(), |
| 1218 | rcu_torture_writer_state, |
| 1219 | cookie, cur_ops->get_gp_state()); |
| 1220 | cur_ops->readunlock(idx); |
| 1221 | } |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1222 | switch (synctype[torture_random(&rand) % nsynctypes]) { |
| 1223 | case RTWS_DEF_FREE: |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1224 | rcu_torture_writer_state = RTWS_DEF_FREE; |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1225 | cur_ops->deferred_free(old_rp); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1226 | break; |
| 1227 | case RTWS_EXP_SYNC: |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1228 | rcu_torture_writer_state = RTWS_EXP_SYNC; |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1229 | cur_ops->exp_sync(); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1230 | rcu_torture_pipe_update(old_rp); |
| 1231 | break; |
| 1232 | case RTWS_COND_GET: |
| 1233 | rcu_torture_writer_state = RTWS_COND_GET; |
Paul E. McKenney | fd56f64b | 2020-11-13 20:14:27 -0800 | [diff] [blame] | 1234 | gp_snap = cur_ops->get_gp_state(); |
Paul E. McKenney | ea31fd9 | 2020-11-17 11:32:54 -0800 | [diff] [blame] | 1235 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1236 | rcu_torture_writer_state = RTWS_COND_SYNC; |
| 1237 | cur_ops->cond_sync(gp_snap); |
| 1238 | rcu_torture_pipe_update(old_rp); |
| 1239 | break; |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1240 | case RTWS_POLL_GET: |
| 1241 | rcu_torture_writer_state = RTWS_POLL_GET; |
| 1242 | gp_snap = cur_ops->start_gp_poll(); |
| 1243 | rcu_torture_writer_state = RTWS_POLL_WAIT; |
Paul E. McKenney | ea31fd9 | 2020-11-17 11:32:54 -0800 | [diff] [blame] | 1244 | while (!cur_ops->poll_gp_state(gp_snap)) |
| 1245 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, |
| 1246 | &rand); |
Paul E. McKenney | 0fd0548 | 2020-11-13 20:43:59 -0800 | [diff] [blame] | 1247 | rcu_torture_pipe_update(old_rp); |
| 1248 | break; |
Paul E. McKenney | f0bf8fa | 2014-03-21 16:17:56 -0700 | [diff] [blame] | 1249 | case RTWS_SYNC: |
| 1250 | rcu_torture_writer_state = RTWS_SYNC; |
| 1251 | cur_ops->sync(); |
| 1252 | rcu_torture_pipe_update(old_rp); |
| 1253 | break; |
Paul E. McKenney | a48f3fa | 2014-03-18 15:57:41 -0700 | [diff] [blame] | 1254 | default: |
| 1255 | WARN_ON_ONCE(1); |
| 1256 | break; |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1257 | } |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1258 | } |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 1259 | WRITE_ONCE(rcu_torture_current_version, |
| 1260 | rcu_torture_current_version + 1); |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1261 | /* Cycle through nesting levels of rcu_expedite_gp() calls. */ |
| 1262 | if (can_expedite && |
| 1263 | !(torture_random(&rand) & 0xff & (!!expediting - 1))) { |
| 1264 | WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); |
| 1265 | if (expediting >= 0) |
| 1266 | rcu_expedite_gp(); |
| 1267 | else |
| 1268 | rcu_unexpedite_gp(); |
| 1269 | if (++expediting > 3) |
| 1270 | expediting = -expediting; |
Paul E. McKenney | f7c0e6a | 2017-12-08 11:37:24 -0800 | [diff] [blame] | 1271 | } else if (!can_expedite) { /* Disabled during boot, recheck. */ |
| 1272 | can_expedite = !rcu_gp_is_expedited() && |
| 1273 | !rcu_gp_is_normal(); |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1274 | } |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1275 | rcu_torture_writer_state = RTWS_STUTTER; |
Paul E. McKenney | 12a910e | 2020-11-16 16:01:50 -0800 | [diff] [blame] | 1276 | boot_ended = rcu_inkernel_boot_has_ended(); |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1277 | stutter_waited = stutter_wait("rcu_torture_writer"); |
| 1278 | if (stutter_waited && |
Paul E. McKenney | 5eabea5 | 2019-04-12 09:02:46 -0700 | [diff] [blame] | 1279 | !READ_ONCE(rcu_fwd_cb_nodelay) && |
Paul E. McKenney | 3432d76 | 2019-04-15 14:50:05 -0700 | [diff] [blame] | 1280 | !cur_ops->slow_gps && |
Paul E. McKenney | 59ee032 | 2019-11-28 18:54:06 -0800 | [diff] [blame] | 1281 | !torture_must_stop() && |
Paul E. McKenney | 12a910e | 2020-11-16 16:01:50 -0800 | [diff] [blame] | 1282 | boot_ended) |
Paul E. McKenney | 474e59b | 2018-08-07 14:34:44 -0700 | [diff] [blame] | 1283 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1284 | if (list_empty(&rcu_tortures[i].rtort_free) && |
| 1285 | rcu_access_pointer(rcu_torture_current) != |
Paul E. McKenney | 34aa34b | 2019-05-16 16:15:16 -0700 | [diff] [blame] | 1286 | &rcu_tortures[i]) { |
| 1287 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 1288 | WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); |
Paul E. McKenney | 34aa34b | 2019-05-16 16:15:16 -0700 | [diff] [blame] | 1289 | } |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 1290 | if (stutter_waited) |
| 1291 | sched_set_normal(current, oldnice); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1292 | } while (!torture_must_stop()); |
Paul E. McKenney | cae7cc6 | 2020-04-26 19:20:37 -0700 | [diff] [blame] | 1293 | rcu_torture_current = NULL; // Let stats task know that we are done. |
Paul E. McKenney | 4bb3c5f | 2015-02-18 16:31:29 -0800 | [diff] [blame] | 1294 | /* Reset expediting back to unexpedited. */ |
| 1295 | if (expediting > 0) |
| 1296 | expediting = -expediting; |
| 1297 | while (can_expedite && expediting++ < 0) |
| 1298 | rcu_unexpedite_gp(); |
| 1299 | WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); |
Paul E. McKenney | f7c0e6a | 2017-12-08 11:37:24 -0800 | [diff] [blame] | 1300 | if (!can_expedite) |
| 1301 | pr_alert("%s" TORTURE_FLAG |
| 1302 | " Dynamic grace-period expediting was disabled.\n", |
| 1303 | torture_type); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1304 | rcu_torture_writer_state = RTWS_STOPPING; |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1305 | torture_kthread_stopping("rcu_torture_writer"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1306 | return 0; |
| 1307 | } |
| 1308 | |
| 1309 | /* |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1310 | * RCU torture fake writer kthread. Repeatedly calls sync, with a random |
| 1311 | * delay between calls. |
| 1312 | */ |
| 1313 | static int |
| 1314 | rcu_torture_fakewriter(void *arg) |
| 1315 | { |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1316 | unsigned long gp_snap; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1317 | DEFINE_TORTURE_RANDOM(rand); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1318 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1319 | VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 1320 | set_user_nice(current, MAX_NICE); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1321 | |
| 1322 | do { |
Paul E. McKenney | 1eba0ef | 2020-11-17 14:12:24 -0800 | [diff] [blame] | 1323 | torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); |
Paul E. McKenney | 72472a0 | 2012-05-29 17:50:51 -0700 | [diff] [blame] | 1324 | if (cur_ops->cb_barrier != NULL && |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1325 | torture_random(&rand) % (nfakewriters * 8) == 0) { |
Paul E. McKenney | 72472a0 | 2012-05-29 17:50:51 -0700 | [diff] [blame] | 1326 | cur_ops->cb_barrier(); |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1327 | } else { |
| 1328 | switch (synctype[torture_random(&rand) % nsynctypes]) { |
| 1329 | case RTWS_DEF_FREE: |
| 1330 | break; |
| 1331 | case RTWS_EXP_SYNC: |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1332 | cur_ops->exp_sync(); |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1333 | break; |
| 1334 | case RTWS_COND_GET: |
| 1335 | gp_snap = cur_ops->get_gp_state(); |
Paul E. McKenney | ea31fd9 | 2020-11-17 11:32:54 -0800 | [diff] [blame] | 1336 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1337 | cur_ops->cond_sync(gp_snap); |
| 1338 | break; |
| 1339 | case RTWS_POLL_GET: |
| 1340 | gp_snap = cur_ops->start_gp_poll(); |
| 1341 | while (!cur_ops->poll_gp_state(gp_snap)) { |
Paul E. McKenney | ea31fd9 | 2020-11-17 11:32:54 -0800 | [diff] [blame] | 1342 | torture_hrtimeout_jiffies(torture_random(&rand) % 16, |
| 1343 | &rand); |
Paul E. McKenney | 682189a | 2020-11-16 17:10:39 -0800 | [diff] [blame] | 1344 | } |
| 1345 | break; |
| 1346 | case RTWS_SYNC: |
| 1347 | cur_ops->sync(); |
| 1348 | break; |
| 1349 | default: |
| 1350 | WARN_ON_ONCE(1); |
| 1351 | break; |
| 1352 | } |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 1353 | } |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 1354 | stutter_wait("rcu_torture_fakewriter"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1355 | } while (!torture_must_stop()); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1356 | |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1357 | torture_kthread_stopping("rcu_torture_fakewriter"); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1358 | return 0; |
| 1359 | } |
| 1360 | |
Paul E. McKenney | f34c8585 | 2017-07-20 15:27:32 -0700 | [diff] [blame] | 1361 | static void rcu_torture_timer_cb(struct rcu_head *rhp) |
| 1362 | { |
| 1363 | kfree(rhp); |
| 1364 | } |
| 1365 | |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1366 | // Set up and carry out testing of RCU's global memory ordering |
| 1367 | static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, |
| 1368 | struct torture_random_state *trsp) |
| 1369 | { |
| 1370 | unsigned long loops; |
Paul E. McKenney | 1afb95f | 2020-12-19 07:34:35 -0800 | [diff] [blame] | 1371 | int noc = torture_num_online_cpus(); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1372 | int rdrchked; |
| 1373 | int rdrchker; |
| 1374 | struct rcu_torture_reader_check *rtrcp; // Me. |
| 1375 | struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. |
| 1376 | struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. |
| 1377 | struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. |
| 1378 | |
| 1379 | if (myid < 0) |
| 1380 | return; // Don't try this from timer handlers. |
| 1381 | |
| 1382 | // Increment my counter. |
| 1383 | rtrcp = &rcu_torture_reader_mbchk[myid]; |
| 1384 | WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); |
| 1385 | |
| 1386 | // Attempt to assign someone else some checking work. |
| 1387 | rdrchked = torture_random(trsp) % nrealreaders; |
| 1388 | rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; |
| 1389 | rdrchker = torture_random(trsp) % nrealreaders; |
| 1390 | rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; |
| 1391 | if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && |
| 1392 | smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. |
| 1393 | !READ_ONCE(rtp->rtort_chkp) && |
| 1394 | !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. |
| 1395 | rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); |
| 1396 | WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); |
| 1397 | rtrcp->rtc_chkrdr = rdrchked; |
| 1398 | WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. |
| 1399 | if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || |
| 1400 | cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) |
| 1401 | (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. |
| 1402 | } |
| 1403 | |
| 1404 | // If assigned some completed work, do it! |
| 1405 | rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); |
| 1406 | if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) |
| 1407 | return; // No work or work not yet ready. |
| 1408 | rdrchked = rtrcp_assigner->rtc_chkrdr; |
| 1409 | if (WARN_ON_ONCE(rdrchked < 0)) |
| 1410 | return; |
| 1411 | rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; |
| 1412 | loops = READ_ONCE(rtrcp_chked->rtc_myloops); |
| 1413 | atomic_inc(&n_rcu_torture_mbchk_tries); |
| 1414 | if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) |
| 1415 | atomic_inc(&n_rcu_torture_mbchk_fail); |
| 1416 | rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; |
| 1417 | rtrcp_assigner->rtc_ready = 0; |
| 1418 | smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. |
| 1419 | smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. |
| 1420 | } |
| 1421 | |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 1422 | /* |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1423 | * Do one extension of an RCU read-side critical section using the |
| 1424 | * current reader state in readstate (set to zero for initial entry |
| 1425 | * to extended critical section), set the new state as specified by |
| 1426 | * newstate (set to zero for final exit from extended critical section), |
| 1427 | * and random-number-generator state in trsp. If this is neither the |
| 1428 | * beginning or end of the critical section and if there was actually a |
| 1429 | * change, do a ->read_delay(). |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1430 | */ |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1431 | static void rcutorture_one_extend(int *readstate, int newstate, |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1432 | struct torture_random_state *trsp, |
| 1433 | struct rt_read_seg *rtrsp) |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1434 | { |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1435 | unsigned long flags; |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1436 | int idxnew1 = -1; |
| 1437 | int idxnew2 = -1; |
| 1438 | int idxold1 = *readstate; |
| 1439 | int idxold2 = idxold1; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1440 | int statesnew = ~*readstate & newstate; |
| 1441 | int statesold = *readstate & ~newstate; |
| 1442 | |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1443 | WARN_ON_ONCE(idxold2 < 0); |
| 1444 | WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1445 | rtrsp->rt_readstate = newstate; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1446 | |
| 1447 | /* First, put new protection in place to avoid critical-section gap. */ |
| 1448 | if (statesnew & RCUTORTURE_RDR_BH) |
| 1449 | local_bh_disable(); |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1450 | if (statesnew & RCUTORTURE_RDR_RBH) |
| 1451 | rcu_read_lock_bh(); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1452 | if (statesnew & RCUTORTURE_RDR_IRQ) |
| 1453 | local_irq_disable(); |
| 1454 | if (statesnew & RCUTORTURE_RDR_PREEMPT) |
| 1455 | preempt_disable(); |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 1456 | if (statesnew & RCUTORTURE_RDR_SCHED) |
| 1457 | rcu_read_lock_sched(); |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1458 | if (statesnew & RCUTORTURE_RDR_RCU_1) |
| 1459 | idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; |
| 1460 | if (statesnew & RCUTORTURE_RDR_RCU_2) |
| 1461 | idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1462 | |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1463 | /* |
| 1464 | * Next, remove old protection, in decreasing order of strength |
| 1465 | * to avoid unlock paths that aren't safe in the stronger |
| 1466 | * context. Namely: BH can not be enabled with disabled interrupts. |
| 1467 | * Additionally PREEMPT_RT requires that BH is enabled in preemptible |
| 1468 | * context. |
| 1469 | */ |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1470 | if (statesold & RCUTORTURE_RDR_IRQ) |
| 1471 | local_irq_enable(); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1472 | if (statesold & RCUTORTURE_RDR_PREEMPT) |
| 1473 | preempt_enable(); |
Paul E. McKenney | 2ceebc0 | 2018-07-06 15:16:12 -0700 | [diff] [blame] | 1474 | if (statesold & RCUTORTURE_RDR_SCHED) |
| 1475 | rcu_read_unlock_sched(); |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1476 | if (statesold & RCUTORTURE_RDR_BH) |
| 1477 | local_bh_enable(); |
| 1478 | if (statesold & RCUTORTURE_RDR_RBH) |
| 1479 | rcu_read_unlock_bh(); |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1480 | if (statesold & RCUTORTURE_RDR_RCU_2) { |
| 1481 | cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); |
| 1482 | WARN_ON_ONCE(idxnew2 != -1); |
| 1483 | idxold2 = 0; |
| 1484 | } |
| 1485 | if (statesold & RCUTORTURE_RDR_RCU_1) { |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 1486 | bool lockit; |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1487 | |
Paul E. McKenney | 340170f | 2021-09-24 21:30:26 -0700 | [diff] [blame] | 1488 | lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1489 | if (lockit) |
| 1490 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1491 | cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); |
| 1492 | WARN_ON_ONCE(idxnew1 != -1); |
| 1493 | idxold1 = 0; |
Paul E. McKenney | 52b1fc3 | 2020-03-28 18:53:25 -0700 | [diff] [blame] | 1494 | if (lockit) |
| 1495 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
| 1496 | } |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1497 | |
| 1498 | /* Delay if neither beginning nor end and there was a change. */ |
| 1499 | if ((statesnew || statesold) && *readstate && newstate) |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1500 | cur_ops->read_delay(trsp, rtrsp); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1501 | |
| 1502 | /* Update the reader state. */ |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1503 | if (idxnew1 == -1) |
| 1504 | idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; |
| 1505 | WARN_ON_ONCE(idxnew1 < 0); |
| 1506 | if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) |
| 1507 | pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); |
| 1508 | if (idxnew2 == -1) |
| 1509 | idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; |
| 1510 | WARN_ON_ONCE(idxnew2 < 0); |
| 1511 | WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); |
| 1512 | *readstate = idxnew1 | idxnew2 | newstate; |
| 1513 | WARN_ON_ONCE(*readstate < 0); |
| 1514 | if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) |
| 1515 | pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1516 | } |
| 1517 | |
| 1518 | /* Return the biggest extendables mask given current RCU and boot parameters. */ |
| 1519 | static int rcutorture_extend_mask_max(void) |
| 1520 | { |
| 1521 | int mask; |
| 1522 | |
| 1523 | WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); |
| 1524 | mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1525 | mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1526 | return mask; |
| 1527 | } |
| 1528 | |
| 1529 | /* Return a random protection state mask, but with at least one bit set. */ |
| 1530 | static int |
| 1531 | rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) |
| 1532 | { |
| 1533 | int mask = rcutorture_extend_mask_max(); |
Paul E. McKenney | bf1bef5 | 2018-06-10 08:50:09 -0700 | [diff] [blame] | 1534 | unsigned long randmask1 = torture_random(trsp) >> 8; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1535 | unsigned long randmask2 = randmask1 >> 3; |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1536 | unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; |
| 1537 | unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; |
| 1538 | unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1539 | |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1540 | WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); |
Paul E. McKenney | a3b0e1e5 | 2019-02-28 15:06:13 -0800 | [diff] [blame] | 1541 | /* Mostly only one bit (need preemption!), sometimes lots of bits. */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1542 | if (!(randmask1 & 0x7)) |
Paul E. McKenney | bf1bef5 | 2018-06-10 08:50:09 -0700 | [diff] [blame] | 1543 | mask = mask & randmask2; |
| 1544 | else |
| 1545 | mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1546 | |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1547 | // Can't have nested RCU reader without outer RCU reader. |
| 1548 | if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { |
| 1549 | if (oldmask & RCUTORTURE_RDR_RCU_1) |
| 1550 | mask &= ~RCUTORTURE_RDR_RCU_2; |
| 1551 | else |
| 1552 | mask |= RCUTORTURE_RDR_RCU_1; |
| 1553 | } |
| 1554 | |
Scott Wood | 71921a9 | 2021-08-20 09:42:36 +0200 | [diff] [blame] | 1555 | /* |
| 1556 | * Can't enable bh w/irq disabled. |
| 1557 | */ |
| 1558 | if (mask & RCUTORTURE_RDR_IRQ) |
| 1559 | mask |= oldmask & bhs; |
| 1560 | |
| 1561 | /* |
| 1562 | * Ideally these sequences would be detected in debug builds |
| 1563 | * (regardless of RT), but until then don't stop testing |
| 1564 | * them on non-RT. |
| 1565 | */ |
| 1566 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
| 1567 | /* Can't modify BH in atomic context */ |
| 1568 | if (oldmask & preempts_irq) |
| 1569 | mask &= ~bhs; |
| 1570 | if ((oldmask | mask) & preempts_irq) |
| 1571 | mask |= oldmask & bhs; |
| 1572 | } |
| 1573 | |
Paul E. McKenney | 1c3d539 | 2021-09-22 20:49:12 -0700 | [diff] [blame] | 1574 | return mask ?: RCUTORTURE_RDR_RCU_1; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1575 | } |
| 1576 | |
| 1577 | /* |
| 1578 | * Do a randomly selected number of extensions of an existing RCU read-side |
| 1579 | * critical section. |
| 1580 | */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1581 | static struct rt_read_seg * |
| 1582 | rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, |
| 1583 | struct rt_read_seg *rtrsp) |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1584 | { |
| 1585 | int i; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1586 | int j; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1587 | int mask = rcutorture_extend_mask_max(); |
| 1588 | |
| 1589 | WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ |
| 1590 | if (!((mask - 1) & mask)) |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1591 | return rtrsp; /* Current RCU reader not extendable. */ |
| 1592 | /* Bias towards larger numbers of loops. */ |
| 1593 | i = (torture_random(trsp) >> 3); |
| 1594 | i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; |
| 1595 | for (j = 0; j < i; j++) { |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1596 | mask = rcutorture_extend_mask(*readstate, trsp); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1597 | rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1598 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1599 | return &rtrsp[j]; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1600 | } |
| 1601 | |
| 1602 | /* |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1603 | * Do one read-side critical section, returning false if there was |
| 1604 | * no data to read. Can be invoked both from process context and |
| 1605 | * from a timer handler. |
| 1606 | */ |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1607 | static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1608 | { |
Paul E. McKenney | bc480a6 | 2020-11-15 12:45:57 -0800 | [diff] [blame] | 1609 | unsigned long cookie; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1610 | int i; |
Paul E. McKenney | 917963d | 2014-11-21 17:10:16 -0800 | [diff] [blame] | 1611 | unsigned long started; |
Paul E. McKenney | 6b80da4 | 2014-11-21 14:19:26 -0800 | [diff] [blame] | 1612 | unsigned long completed; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1613 | int newstate; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1614 | struct rcu_torture *p; |
| 1615 | int pipe_count; |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1616 | int readstate = 0; |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1617 | struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; |
| 1618 | struct rt_read_seg *rtrsp = &rtseg[0]; |
| 1619 | struct rt_read_seg *rtrsp1; |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1620 | unsigned long long ts; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1621 | |
Paul E. McKenney | 7752275 | 2020-06-11 16:43:14 -0700 | [diff] [blame] | 1622 | WARN_ON_ONCE(!rcu_is_watching()); |
Paul E. McKenney | 2397d07 | 2018-05-25 07:29:25 -0700 | [diff] [blame] | 1623 | newstate = rcutorture_extend_mask(readstate, trsp); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1624 | rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); |
Paul E. McKenney | bc480a6 | 2020-11-15 12:45:57 -0800 | [diff] [blame] | 1625 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 1626 | cookie = cur_ops->get_gp_state(); |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 1627 | started = cur_ops->get_gp_seq(); |
Steven Rostedt | e4aa0da | 2013-02-04 13:36:13 -0500 | [diff] [blame] | 1628 | ts = rcu_trace_clock_local(); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 1629 | p = rcu_dereference_check(rcu_torture_current, |
Paul E. McKenney | a5c095e | 2021-03-13 20:05:31 -0800 | [diff] [blame] | 1630 | !cur_ops->readlock_held || cur_ops->readlock_held()); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1631 | if (p == NULL) { |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1632 | /* Wait for rcu_torture_writer to get underway */ |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1633 | rcutorture_one_extend(&readstate, 0, trsp, rtrsp); |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1634 | return false; |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1635 | } |
| 1636 | if (p->rtort_mbtest == 0) |
| 1637 | atomic_inc(&n_rcu_torture_mberror); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1638 | rcu_torture_reader_do_mbchk(myid, p, trsp); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1639 | rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1640 | preempt_disable(); |
Paul E. McKenney | 2024891 | 2019-12-21 10:41:48 -0800 | [diff] [blame] | 1641 | pipe_count = READ_ONCE(p->rtort_pipe_count); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1642 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { |
| 1643 | /* Should not happen, but... */ |
| 1644 | pipe_count = RCU_TORTURE_PIPE_LEN; |
| 1645 | } |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 1646 | completed = cur_ops->get_gp_seq(); |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1647 | if (pipe_count > 1) { |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1648 | do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, |
| 1649 | ts, started, completed); |
Paul E. McKenney | 274529b | 2016-03-21 19:46:04 -0700 | [diff] [blame] | 1650 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 1651 | } |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 1652 | __this_cpu_inc(rcu_torture_count[pipe_count]); |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 1653 | completed = rcutorture_seq_diff(completed, started); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1654 | if (completed > RCU_TORTURE_PIPE_LEN) { |
| 1655 | /* Should not happen, but... */ |
| 1656 | completed = RCU_TORTURE_PIPE_LEN; |
| 1657 | } |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 1658 | __this_cpu_inc(rcu_torture_batch[completed]); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1659 | preempt_enable(); |
Paul E. McKenney | bc480a6 | 2020-11-15 12:45:57 -0800 | [diff] [blame] | 1660 | if (cur_ops->get_gp_state && cur_ops->poll_gp_state) |
| 1661 | WARN_ONCE(cur_ops->poll_gp_state(cookie), |
Paul E. McKenney | 7ac3fdf | 2021-02-25 20:56:10 -0800 | [diff] [blame] | 1662 | "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", |
Paul E. McKenney | bc480a6 | 2020-11-15 12:45:57 -0800 | [diff] [blame] | 1663 | __func__, |
| 1664 | rcu_torture_writer_state_getname(), |
| 1665 | rcu_torture_writer_state, |
| 1666 | cookie, cur_ops->get_gp_state()); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1667 | rcutorture_one_extend(&readstate, 0, trsp, rtrsp); |
Paul E. McKenney | 902d82e6 | 2021-09-22 20:31:44 -0700 | [diff] [blame] | 1668 | WARN_ON_ONCE(readstate); |
Paul E. McKenney | d685514 | 2020-08-11 10:33:39 -0700 | [diff] [blame] | 1669 | // This next splat is expected behavior if leakpointer, especially |
| 1670 | // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. |
| 1671 | WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 1672 | |
| 1673 | /* If error or close call, record the sequence of reader protections. */ |
| 1674 | if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { |
| 1675 | i = 0; |
| 1676 | for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) |
| 1677 | err_segs[i++] = *rtrsp1; |
| 1678 | rt_read_nsegs = i; |
| 1679 | } |
| 1680 | |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1681 | return true; |
| 1682 | } |
| 1683 | |
Paul E. McKenney | 3025520e | 2018-05-22 11:38:47 -0700 | [diff] [blame] | 1684 | static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); |
| 1685 | |
Paul E. McKenney | 6b06aa7 | 2018-05-22 10:56:05 -0700 | [diff] [blame] | 1686 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1687 | * RCU torture reader from timer handler. Dereferences rcu_torture_current, |
| 1688 | * incrementing the corresponding element of the pipeline array. The |
| 1689 | * counter in the element should never be greater than 1, otherwise, the |
| 1690 | * RCU implementation is broken. |
| 1691 | */ |
| 1692 | static void rcu_torture_timer(struct timer_list *unused) |
| 1693 | { |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 1694 | atomic_long_inc(&n_rcu_torture_timers); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1695 | (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); |
Paul E. McKenney | f34c8585 | 2017-07-20 15:27:32 -0700 | [diff] [blame] | 1696 | |
| 1697 | /* Test call_rcu() invocation from interrupt handler. */ |
| 1698 | if (cur_ops->call) { |
| 1699 | struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); |
| 1700 | |
| 1701 | if (rhp) |
| 1702 | cur_ops->call(rhp, rcu_torture_timer_cb); |
| 1703 | } |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1704 | } |
| 1705 | |
| 1706 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1707 | * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, |
| 1708 | * incrementing the corresponding element of the pipeline array. The |
| 1709 | * counter in the element should never be greater than 1, otherwise, the |
| 1710 | * RCU implementation is broken. |
| 1711 | */ |
| 1712 | static int |
| 1713 | rcu_torture_reader(void *arg) |
| 1714 | { |
Paul E. McKenney | 444da51 | 2018-07-04 14:14:42 -0700 | [diff] [blame] | 1715 | unsigned long lastsleep = jiffies; |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 1716 | long myid = (long)arg; |
| 1717 | int mynumonline = myid; |
Paul E. McKenney | 51b1130 | 2014-01-27 11:49:39 -0800 | [diff] [blame] | 1718 | DEFINE_TORTURE_RANDOM(rand); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1719 | struct timer_list t; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1720 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1721 | VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 1722 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 1723 | if (irqreader && cur_ops->irq_capable) |
Kees Cook | fd30b71 | 2017-10-22 17:58:54 -0700 | [diff] [blame] | 1724 | timer_setup_on_stack(&t, rcu_torture_timer, 0); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1725 | tick_dep_set_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1726 | do { |
Paul E. McKenney | 0acc512 | 2009-06-25 09:08:17 -0700 | [diff] [blame] | 1727 | if (irqreader && cur_ops->irq_capable) { |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1728 | if (!timer_pending(&t)) |
Paul E. McKenney | 6155fec | 2010-02-22 17:05:04 -0800 | [diff] [blame] | 1729 | mod_timer(&t, jiffies + 1); |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1730 | } |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1731 | if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1732 | schedule_timeout_interruptible(HZ); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1733 | if (time_after(jiffies, lastsleep) && !torture_must_stop()) { |
Paul E. McKenney | 1eba0ef | 2020-11-17 14:12:24 -0800 | [diff] [blame] | 1734 | torture_hrtimeout_us(500, 1000, &rand); |
Paul E. McKenney | 444da51 | 2018-07-04 14:14:42 -0700 | [diff] [blame] | 1735 | lastsleep = jiffies + 10; |
| 1736 | } |
Paul E. McKenney | 1afb95f | 2020-12-19 07:34:35 -0800 | [diff] [blame] | 1737 | while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 1738 | schedule_timeout_interruptible(HZ / 5); |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 1739 | stutter_wait("rcu_torture_reader"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1740 | } while (!torture_must_stop()); |
Thomas Gleixner | 424c1b6 | 2014-03-23 08:58:27 -0700 | [diff] [blame] | 1741 | if (irqreader && cur_ops->irq_capable) { |
Paul E. McKenney | 0729fbf | 2008-06-25 12:24:52 -0700 | [diff] [blame] | 1742 | del_timer_sync(&t); |
Thomas Gleixner | 424c1b6 | 2014-03-23 08:58:27 -0700 | [diff] [blame] | 1743 | destroy_timer_on_stack(&t); |
| 1744 | } |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 1745 | tick_dep_clear_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1746 | torture_kthread_stopping("rcu_torture_reader"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1747 | return 0; |
| 1748 | } |
| 1749 | |
| 1750 | /* |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 1751 | * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to |
| 1752 | * increase race probabilities and fuzzes the interval between toggling. |
| 1753 | */ |
| 1754 | static int rcu_nocb_toggle(void *arg) |
| 1755 | { |
| 1756 | int cpu; |
| 1757 | int maxcpu = -1; |
| 1758 | int oldnice = task_nice(current); |
| 1759 | long r; |
| 1760 | DEFINE_TORTURE_RANDOM(rand); |
| 1761 | ktime_t toggle_delay; |
| 1762 | unsigned long toggle_fuzz; |
| 1763 | ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); |
| 1764 | |
| 1765 | VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); |
| 1766 | while (!rcu_inkernel_boot_has_ended()) |
| 1767 | schedule_timeout_interruptible(HZ / 10); |
| 1768 | for_each_online_cpu(cpu) |
| 1769 | maxcpu = cpu; |
| 1770 | WARN_ON(maxcpu < 0); |
| 1771 | if (toggle_interval > ULONG_MAX) |
| 1772 | toggle_fuzz = ULONG_MAX >> 3; |
| 1773 | else |
| 1774 | toggle_fuzz = toggle_interval >> 3; |
| 1775 | if (toggle_fuzz <= 0) |
| 1776 | toggle_fuzz = NSEC_PER_USEC; |
| 1777 | do { |
| 1778 | r = torture_random(&rand); |
| 1779 | cpu = (r >> 4) % (maxcpu + 1); |
| 1780 | if (r & 0x1) { |
| 1781 | rcu_nocb_cpu_offload(cpu); |
| 1782 | atomic_long_inc(&n_nocb_offload); |
| 1783 | } else { |
| 1784 | rcu_nocb_cpu_deoffload(cpu); |
| 1785 | atomic_long_inc(&n_nocb_deoffload); |
| 1786 | } |
| 1787 | toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; |
| 1788 | set_current_state(TASK_INTERRUPTIBLE); |
| 1789 | schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); |
| 1790 | if (stutter_wait("rcu_nocb_toggle")) |
| 1791 | sched_set_normal(current, oldnice); |
| 1792 | } while (!torture_must_stop()); |
| 1793 | torture_kthread_stopping("rcu_nocb_toggle"); |
| 1794 | return 0; |
| 1795 | } |
| 1796 | |
| 1797 | /* |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1798 | * Print torture statistics. Caller must ensure that there is only |
| 1799 | * one call to this function at a given time!!! This is normally |
| 1800 | * accomplished by relying on the module system to only have one copy |
| 1801 | * of the module loaded, and then by giving the rcu_torture_stats |
| 1802 | * kthread full control (or the init/cleanup functions when rcu_torture_stats |
| 1803 | * thread is not running). |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1804 | */ |
Chen Gang | d100895 | 2013-11-07 10:30:25 +0800 | [diff] [blame] | 1805 | static void |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1806 | rcu_torture_stats_print(void) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1807 | { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1808 | int cpu; |
| 1809 | int i; |
| 1810 | long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
| 1811 | long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1812 | struct rcu_torture *rtcp; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1813 | static unsigned long rtcv_snap = ULONG_MAX; |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 1814 | static bool splatted; |
Paul E. McKenney | 4ffa669 | 2016-06-30 11:56:38 -0700 | [diff] [blame] | 1815 | struct task_struct *wtp; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1816 | |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 1817 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1818 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
Paul E. McKenney | f042a43 | 2020-01-03 16:27:00 -0800 | [diff] [blame] | 1819 | pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); |
| 1820 | batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1821 | } |
| 1822 | } |
| 1823 | for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { |
| 1824 | if (pipesummary[i] != 0) |
| 1825 | break; |
| 1826 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1827 | |
| 1828 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1829 | rtcp = rcu_access_pointer(rcu_torture_current); |
Paul E. McKenney | 354ea05 | 2019-05-25 12:36:53 -0700 | [diff] [blame] | 1830 | pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1831 | rtcp, |
| 1832 | rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1833 | rcu_torture_current_version, |
| 1834 | list_empty(&rcu_torture_freelist), |
| 1835 | atomic_read(&n_rcu_torture_alloc), |
| 1836 | atomic_read(&n_rcu_torture_alloc_fail), |
| 1837 | atomic_read(&n_rcu_torture_free)); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1838 | pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ", |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1839 | atomic_read(&n_rcu_torture_mberror), |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1840 | atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), |
SeongJae Park | 472213a | 2016-08-13 15:54:35 +0900 | [diff] [blame] | 1841 | n_rcu_torture_barrier_error, |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1842 | n_rcu_torture_boost_ktrerror, |
| 1843 | n_rcu_torture_boost_rterror); |
| 1844 | pr_cont("rtbf: %ld rtb: %ld nt: %ld ", |
| 1845 | n_rcu_torture_boost_failure, |
| 1846 | n_rcu_torture_boosts, |
Paul E. McKenney | 8da9a59 | 2018-05-22 11:17:51 -0700 | [diff] [blame] | 1847 | atomic_long_read(&n_rcu_torture_timers)); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1848 | torture_onoff_stats(); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 1849 | pr_cont("barrier: %ld/%ld:%ld ", |
Paul E. McKenney | c9527be | 2020-02-18 13:41:02 -0800 | [diff] [blame] | 1850 | data_race(n_barrier_successes), |
| 1851 | data_race(n_barrier_attempts), |
| 1852 | data_race(n_rcu_torture_barrier_error)); |
Paul E. McKenney | f759081 | 2020-12-21 11:17:16 -0800 | [diff] [blame] | 1853 | pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 1854 | pr_cont("nocb-toggles: %ld:%ld\n", |
| 1855 | atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1856 | |
| 1857 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1858 | if (atomic_read(&n_rcu_torture_mberror) || |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1859 | atomic_read(&n_rcu_torture_mbchk_fail) || |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1860 | n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || |
| 1861 | n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 1862 | i > 1) { |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1863 | pr_cont("%s", "!!! "); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 1864 | atomic_inc(&n_rcu_torture_error); |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1865 | WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 1866 | WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1867 | WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() |
| 1868 | WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread |
| 1869 | WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 1870 | WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) |
Paul E. McKenney | 8b5ddf8 | 2019-08-14 12:02:40 -0700 | [diff] [blame] | 1871 | WARN_ON_ONCE(i > 1); // Too-short grace period |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 1872 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1873 | pr_cont("Reader Pipe: "); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1874 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1875 | pr_cont(" %ld", pipesummary[i]); |
| 1876 | pr_cont("\n"); |
| 1877 | |
| 1878 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
| 1879 | pr_cont("Reader Batch: "); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 1880 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1881 | pr_cont(" %ld", batchsummary[i]); |
| 1882 | pr_cont("\n"); |
| 1883 | |
| 1884 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
| 1885 | pr_cont("Free-Block Circulation: "); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1886 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1887 | pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1888 | } |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1889 | pr_cont("\n"); |
| 1890 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 1891 | if (cur_ops->stats) |
Joe Perches | eea203f | 2014-07-14 09:16:15 -0400 | [diff] [blame] | 1892 | cur_ops->stats(); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1893 | if (rtcv_snap == rcu_torture_current_version && |
Paul E. McKenney | 5396d31 | 2020-01-08 19:58:13 -0800 | [diff] [blame] | 1894 | rcu_access_pointer(rcu_torture_current) && |
| 1895 | !rcu_stall_is_suppressed()) { |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1896 | int __maybe_unused flags = 0; |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1897 | unsigned long __maybe_unused gp_seq = 0; |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1898 | |
| 1899 | rcutorture_get_gp_data(cur_ops->ttype, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1900 | &flags, &gp_seq); |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1901 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1902 | &flags, &gp_seq); |
Paul E. McKenney | 4ffa669 | 2016-06-30 11:56:38 -0700 | [diff] [blame] | 1903 | wtp = READ_ONCE(writer_task); |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 1904 | pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", |
Paul E. McKenney | 18aff33 | 2015-11-17 13:35:28 -0800 | [diff] [blame] | 1905 | rcu_torture_writer_state_getname(), |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1906 | rcu_torture_writer_state, gp_seq, flags, |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 1907 | wtp == NULL ? ~0U : wtp->__state, |
Paul E. McKenney | 808de39 | 2017-06-19 10:03:22 -0700 | [diff] [blame] | 1908 | wtp == NULL ? -1 : (int)task_cpu(wtp)); |
Paul E. McKenney | 0032f4e | 2017-08-30 10:40:17 -0700 | [diff] [blame] | 1909 | if (!splatted && wtp) { |
| 1910 | sched_show_task(wtp); |
| 1911 | splatted = true; |
| 1912 | } |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 1913 | if (cur_ops->gp_kthread_dbg) |
| 1914 | cur_ops->gp_kthread_dbg(); |
Paul E. McKenney | 274529b | 2016-03-21 19:46:04 -0700 | [diff] [blame] | 1915 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | ad0dc7f | 2014-02-19 10:51:42 -0800 | [diff] [blame] | 1916 | } |
| 1917 | rtcv_snap = rcu_torture_current_version; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1918 | } |
| 1919 | |
| 1920 | /* |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1921 | * Periodically prints torture statistics, if periodic statistics printing |
| 1922 | * was specified via the stat_interval module parameter. |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1923 | */ |
| 1924 | static int |
| 1925 | rcu_torture_stats(void *arg) |
| 1926 | { |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 1927 | VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1928 | do { |
| 1929 | schedule_timeout_interruptible(stat_interval * HZ); |
| 1930 | rcu_torture_stats_print(); |
Paul E. McKenney | f67a335 | 2014-01-29 07:40:27 -0800 | [diff] [blame] | 1931 | torture_shutdown_absorb("rcu_torture_stats"); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 1932 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 1933 | torture_kthread_stopping("rcu_torture_stats"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 1934 | return 0; |
| 1935 | } |
| 1936 | |
Paul E. McKenney | 7ab2bd3 | 2021-05-02 19:56:05 -0700 | [diff] [blame] | 1937 | /* Test mem_dump_obj() and friends. */ |
| 1938 | static void rcu_torture_mem_dump_obj(void) |
| 1939 | { |
| 1940 | struct rcu_head *rhp; |
| 1941 | struct kmem_cache *kcp; |
| 1942 | static int z; |
| 1943 | |
| 1944 | kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); |
| 1945 | rhp = kmem_cache_alloc(kcp, GFP_KERNEL); |
| 1946 | pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); |
| 1947 | pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); |
| 1948 | mem_dump_obj(ZERO_SIZE_PTR); |
| 1949 | pr_alert("mem_dump_obj(NULL):"); |
| 1950 | mem_dump_obj(NULL); |
| 1951 | pr_alert("mem_dump_obj(%px):", &rhp); |
| 1952 | mem_dump_obj(&rhp); |
| 1953 | pr_alert("mem_dump_obj(%px):", rhp); |
| 1954 | mem_dump_obj(rhp); |
| 1955 | pr_alert("mem_dump_obj(%px):", &rhp->func); |
| 1956 | mem_dump_obj(&rhp->func); |
| 1957 | pr_alert("mem_dump_obj(%px):", &z); |
| 1958 | mem_dump_obj(&z); |
| 1959 | kmem_cache_free(kcp, rhp); |
| 1960 | kmem_cache_destroy(kcp); |
| 1961 | rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
| 1962 | pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); |
| 1963 | pr_alert("mem_dump_obj(kmalloc %px):", rhp); |
| 1964 | mem_dump_obj(rhp); |
| 1965 | pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); |
| 1966 | mem_dump_obj(&rhp->func); |
| 1967 | kfree(rhp); |
| 1968 | rhp = vmalloc(4096); |
| 1969 | pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); |
| 1970 | pr_alert("mem_dump_obj(vmalloc %px):", rhp); |
| 1971 | mem_dump_obj(rhp); |
| 1972 | pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); |
| 1973 | mem_dump_obj(&rhp->func); |
| 1974 | vfree(rhp); |
| 1975 | } |
| 1976 | |
Paul E. McKenney | eac45e5 | 2018-05-17 11:33:17 -0700 | [diff] [blame] | 1977 | static void |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 1978 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 1979 | { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 1980 | pr_alert("%s" TORTURE_FLAG |
| 1981 | "--- %s: nreaders=%d nfakewriters=%d " |
| 1982 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
| 1983 | "shuffle_interval=%d stutter=%d irqreader=%d " |
| 1984 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " |
| 1985 | "test_boost=%d/%d test_boost_interval=%d " |
| 1986 | "test_boost_duration=%d shutdown_secs=%d " |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 1987 | "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1988 | "stall_cpu_block=%d " |
Paul E. McKenney | 67afeed | 2012-10-20 12:56:06 -0700 | [diff] [blame] | 1989 | "n_barrier_cbs=%d " |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 1990 | "onoff_interval=%d onoff_holdoff=%d " |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 1991 | "read_exit_delay=%d read_exit_burst=%d " |
| 1992 | "nocbs_nthreads=%d nocbs_toggle=%d\n", |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 1993 | torture_type, tag, nrealreaders, nfakewriters, |
| 1994 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
| 1995 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, |
| 1996 | test_boost, cur_ops->can_boost, |
| 1997 | test_boost_interval, test_boost_duration, shutdown_secs, |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 1998 | stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 1999 | stall_cpu_block, |
Paul E. McKenney | 67afeed | 2012-10-20 12:56:06 -0700 | [diff] [blame] | 2000 | n_barrier_cbs, |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2001 | onoff_interval, onoff_holdoff, |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 2002 | read_exit_delay, read_exit_burst, |
| 2003 | nocbs_nthreads, nocbs_toggle); |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 2004 | } |
| 2005 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2006 | static int rcutorture_booster_cleanup(unsigned int cpu) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2007 | { |
| 2008 | struct task_struct *t; |
| 2009 | |
| 2010 | if (boost_tasks[cpu] == NULL) |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2011 | return 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2012 | mutex_lock(&boost_mutex); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2013 | t = boost_tasks[cpu]; |
| 2014 | boost_tasks[cpu] = NULL; |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 2015 | rcu_torture_enable_rt_throttle(); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2016 | mutex_unlock(&boost_mutex); |
| 2017 | |
| 2018 | /* This must be outside of the mutex, otherwise deadlock! */ |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2019 | torture_stop_kthread(rcu_torture_boost, t); |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2020 | return 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2021 | } |
| 2022 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2023 | static int rcutorture_booster_init(unsigned int cpu) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2024 | { |
| 2025 | int retval; |
| 2026 | |
| 2027 | if (boost_tasks[cpu] != NULL) |
| 2028 | return 0; /* Already created, nothing more to do. */ |
| 2029 | |
| 2030 | /* Don't allow time recalculation while creating a new task. */ |
| 2031 | mutex_lock(&boost_mutex); |
Joel Fernandes (Google) | 450efca | 2018-06-10 16:45:43 -0700 | [diff] [blame] | 2032 | rcu_torture_disable_rt_throttle(); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2033 | VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); |
Eric Dumazet | 1f28809 | 2011-06-16 15:53:18 -0700 | [diff] [blame] | 2034 | boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, |
| 2035 | cpu_to_node(cpu), |
| 2036 | "rcu_torture_boost"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2037 | if (IS_ERR(boost_tasks[cpu])) { |
| 2038 | retval = PTR_ERR(boost_tasks[cpu]); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2039 | VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2040 | n_rcu_torture_boost_ktrerror++; |
| 2041 | boost_tasks[cpu] = NULL; |
| 2042 | mutex_unlock(&boost_mutex); |
| 2043 | return retval; |
| 2044 | } |
| 2045 | kthread_bind(boost_tasks[cpu], cpu); |
| 2046 | wake_up_process(boost_tasks[cpu]); |
| 2047 | mutex_unlock(&boost_mutex); |
| 2048 | return 0; |
| 2049 | } |
| 2050 | |
Paul E. McKenney | d5f546d | 2011-11-04 11:44:12 -0700 | [diff] [blame] | 2051 | /* |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2052 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then |
| 2053 | * induces a CPU stall for the time specified by stall_cpu. |
| 2054 | */ |
Paul Gortmaker | 49fb4c6 | 2013-06-19 14:52:21 -0400 | [diff] [blame] | 2055 | static int rcu_torture_stall(void *args) |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2056 | { |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2057 | int idx; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2058 | unsigned long stop_at; |
| 2059 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2060 | VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2061 | if (stall_cpu_holdoff > 0) { |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2062 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2063 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2064 | VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2065 | } |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 2066 | if (!kthread_should_stop() && stall_gp_kthread > 0) { |
| 2067 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); |
| 2068 | rcu_gp_set_torture_wait(stall_gp_kthread * HZ); |
| 2069 | for (idx = 0; idx < stall_gp_kthread + 2; idx++) { |
| 2070 | if (kthread_should_stop()) |
| 2071 | break; |
| 2072 | schedule_timeout_uninterruptible(HZ); |
| 2073 | } |
| 2074 | } |
| 2075 | if (!kthread_should_stop() && stall_cpu > 0) { |
| 2076 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); |
Arnd Bergmann | 622be33f | 2018-06-18 16:47:34 +0200 | [diff] [blame] | 2077 | stop_at = ktime_get_seconds() + stall_cpu; |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2078 | /* RCU CPU stall is expected behavior in following code. */ |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2079 | idx = cur_ops->readlock(); |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2080 | if (stall_cpu_irqsoff) |
| 2081 | local_irq_disable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2082 | else if (!stall_cpu_block) |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2083 | preempt_disable(); |
Stephen Zhang | 0a27fff | 2021-01-23 17:54:17 +0800 | [diff] [blame] | 2084 | pr_alert("%s start on CPU %d.\n", |
| 2085 | __func__, raw_smp_processor_id()); |
Arnd Bergmann | 622be33f | 2018-06-18 16:47:34 +0200 | [diff] [blame] | 2086 | while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), |
| 2087 | stop_at)) |
Paul E. McKenney | 59e8366 | 2021-05-16 21:17:27 -0700 | [diff] [blame] | 2088 | if (stall_cpu_block) { |
| 2089 | #ifdef CONFIG_PREEMPTION |
| 2090 | preempt_schedule(); |
| 2091 | #else |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2092 | schedule_timeout_uninterruptible(HZ); |
Paul E. McKenney | 59e8366 | 2021-05-16 21:17:27 -0700 | [diff] [blame] | 2093 | #endif |
Wander Lairson Costa | 5ff7c9f | 2021-11-10 11:37:45 -0300 | [diff] [blame] | 2094 | } else if (stall_no_softlockup) { |
| 2095 | touch_softlockup_watchdog(); |
Paul E. McKenney | 59e8366 | 2021-05-16 21:17:27 -0700 | [diff] [blame] | 2096 | } |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2097 | if (stall_cpu_irqsoff) |
| 2098 | local_irq_enable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2099 | else if (!stall_cpu_block) |
Paul E. McKenney | 2b1516e | 2017-08-18 16:11:37 -0700 | [diff] [blame] | 2100 | preempt_enable(); |
Paul E. McKenney | 19a8ff9 | 2020-03-11 17:39:12 -0700 | [diff] [blame] | 2101 | cur_ops->readunlock(idx); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2102 | } |
Stephen Zhang | 0a27fff | 2021-01-23 17:54:17 +0800 | [diff] [blame] | 2103 | pr_alert("%s end.\n", __func__); |
Paul E. McKenney | f67a335 | 2014-01-29 07:40:27 -0800 | [diff] [blame] | 2104 | torture_shutdown_absorb("rcu_torture_stall"); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2105 | while (!kthread_should_stop()) |
| 2106 | schedule_timeout_interruptible(10 * HZ); |
| 2107 | return 0; |
| 2108 | } |
| 2109 | |
| 2110 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ |
| 2111 | static int __init rcu_torture_stall_init(void) |
| 2112 | { |
Paul E. McKenney | 55b2dcf | 2020-04-01 19:57:52 -0700 | [diff] [blame] | 2113 | if (stall_cpu <= 0 && stall_gp_kthread <= 0) |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2114 | return 0; |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2115 | return torture_create_kthread(rcu_torture_stall, NULL, stall_task); |
Paul E. McKenney | c13f375 | 2012-01-20 15:36:33 -0800 | [diff] [blame] | 2116 | } |
| 2117 | |
Paul E. McKenney | 9fdcb9a | 2018-07-19 13:36:00 -0700 | [diff] [blame] | 2118 | /* State structure for forward-progress self-propagating RCU callback. */ |
| 2119 | struct fwd_cb_state { |
| 2120 | struct rcu_head rh; |
| 2121 | int stop; |
| 2122 | }; |
| 2123 | |
| 2124 | /* |
| 2125 | * Forward-progress self-propagating RCU callback function. Because |
| 2126 | * callbacks run from softirq, this function is an implicit RCU read-side |
| 2127 | * critical section. |
| 2128 | */ |
| 2129 | static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) |
| 2130 | { |
| 2131 | struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); |
| 2132 | |
| 2133 | if (READ_ONCE(fcsp->stop)) { |
| 2134 | WRITE_ONCE(fcsp->stop, 2); |
| 2135 | return; |
| 2136 | } |
| 2137 | cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); |
| 2138 | } |
| 2139 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2140 | /* State for continuous-flood RCU callbacks. */ |
| 2141 | struct rcu_fwd_cb { |
| 2142 | struct rcu_head rh; |
| 2143 | struct rcu_fwd_cb *rfc_next; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2144 | struct rcu_fwd *rfc_rfp; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2145 | int rfc_gps; |
| 2146 | }; |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2147 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2148 | #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ |
| 2149 | #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ |
| 2150 | #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ |
Paul E. McKenney | 2e57bf9 | 2018-10-05 16:43:09 -0700 | [diff] [blame] | 2151 | #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2152 | #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) |
| 2153 | |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2154 | struct rcu_launder_hist { |
| 2155 | long n_launders; |
| 2156 | unsigned long launder_gp_seq; |
| 2157 | }; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2158 | |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2159 | struct rcu_fwd { |
| 2160 | spinlock_t rcu_fwd_lock; |
| 2161 | struct rcu_fwd_cb *rcu_fwd_cb_head; |
| 2162 | struct rcu_fwd_cb **rcu_fwd_cb_tail; |
| 2163 | long n_launders_cb; |
| 2164 | unsigned long rcu_fwd_startat; |
| 2165 | struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; |
| 2166 | unsigned long rcu_launder_gp_seq_start; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2167 | int rcu_fwd_id; |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2168 | }; |
| 2169 | |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2170 | static DEFINE_MUTEX(rcu_fwd_mutex); |
Jason Yan | afbc157 | 2020-04-09 19:42:38 +0800 | [diff] [blame] | 2171 | static struct rcu_fwd *rcu_fwds; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2172 | static unsigned long rcu_fwd_seq; |
Paul E. McKenney | 53b541f | 2021-11-23 13:51:11 -0800 | [diff] [blame] | 2173 | static atomic_long_t rcu_fwd_max_cbs; |
Jason Yan | afbc157 | 2020-04-09 19:42:38 +0800 | [diff] [blame] | 2174 | static bool rcu_fwd_emergency_stop; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2175 | |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2176 | static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2177 | { |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2178 | unsigned long gps; |
| 2179 | unsigned long gps_old; |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2180 | int i; |
| 2181 | int j; |
| 2182 | |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2183 | for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) |
| 2184 | if (rfp->n_launders_hist[i].n_launders > 0) |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2185 | break; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2186 | mutex_lock(&rcu_fwd_mutex); // Serialize histograms. |
| 2187 | pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", |
| 2188 | __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2189 | gps_old = rfp->rcu_launder_gp_seq_start; |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2190 | for (j = 0; j <= i; j++) { |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2191 | gps = rfp->n_launders_hist[j].launder_gp_seq; |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2192 | pr_cont(" %ds/%d: %ld:%ld", |
Paul E. McKenney | a289e60 | 2019-11-05 08:31:56 -0800 | [diff] [blame] | 2193 | j + 1, FWD_CBS_HIST_DIV, |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2194 | rfp->n_launders_hist[j].n_launders, |
Paul E. McKenney | cd618d1 | 2019-01-08 13:41:26 -0800 | [diff] [blame] | 2195 | rcutorture_seq_diff(gps, gps_old)); |
| 2196 | gps_old = gps; |
| 2197 | } |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2198 | pr_cont("\n"); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2199 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | 1a68275 | 2018-10-03 12:33:41 -0700 | [diff] [blame] | 2200 | } |
| 2201 | |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2202 | /* Callback function for continuous-flood RCU callbacks. */ |
| 2203 | static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) |
| 2204 | { |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2205 | unsigned long flags; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2206 | int i; |
| 2207 | struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); |
| 2208 | struct rcu_fwd_cb **rfcpp; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2209 | struct rcu_fwd *rfp = rfcp->rfc_rfp; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2210 | |
| 2211 | rfcp->rfc_next = NULL; |
| 2212 | rfcp->rfc_gps++; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2213 | spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
| 2214 | rfcpp = rfp->rcu_fwd_cb_tail; |
| 2215 | rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2216 | WRITE_ONCE(*rfcpp, rfcp); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2217 | WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); |
| 2218 | i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); |
| 2219 | if (i >= ARRAY_SIZE(rfp->n_launders_hist)) |
| 2220 | i = ARRAY_SIZE(rfp->n_launders_hist) - 1; |
| 2221 | rfp->n_launders_hist[i].n_launders++; |
| 2222 | rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); |
| 2223 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2224 | } |
| 2225 | |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2226 | // Give the scheduler a chance, even on nohz_full CPUs. |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2227 | static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2228 | { |
Sebastian Andrzej Siewior | 90326f0 | 2019-10-15 21:18:14 +0200 | [diff] [blame] | 2229 | if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2230 | // Real call_rcu() floods hit userspace, so emulate that. |
| 2231 | if (need_resched() || (iter & 0xfff)) |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2232 | schedule(); |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2233 | return; |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2234 | } |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2235 | // No userspace emulation: CB invocation throttles call_rcu() |
| 2236 | cond_resched(); |
Paul E. McKenney | ab21f60 | 2019-04-14 18:30:22 -0700 | [diff] [blame] | 2237 | } |
| 2238 | |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2239 | /* |
| 2240 | * Free all callbacks on the rcu_fwd_cb_head list, either because the |
| 2241 | * test is over or because we hit an OOM event. |
| 2242 | */ |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2243 | static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2244 | { |
| 2245 | unsigned long flags; |
| 2246 | unsigned long freed = 0; |
| 2247 | struct rcu_fwd_cb *rfcp; |
| 2248 | |
| 2249 | for (;;) { |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2250 | spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
| 2251 | rfcp = rfp->rcu_fwd_cb_head; |
Paul E. McKenney | 140e53f | 2019-04-09 10:08:18 -0700 | [diff] [blame] | 2252 | if (!rfcp) { |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2253 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2254 | break; |
Paul E. McKenney | 140e53f | 2019-04-09 10:08:18 -0700 | [diff] [blame] | 2255 | } |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2256 | rfp->rcu_fwd_cb_head = rfcp->rfc_next; |
| 2257 | if (!rfp->rcu_fwd_cb_head) |
| 2258 | rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; |
| 2259 | spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2260 | kfree(rfcp); |
| 2261 | freed++; |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2262 | rcu_torture_fwd_prog_cond_resched(freed); |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 2263 | if (tick_nohz_full_enabled()) { |
| 2264 | local_irq_save(flags); |
| 2265 | rcu_momentary_dyntick_idle(); |
| 2266 | local_irq_restore(flags); |
| 2267 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2268 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2269 | return freed; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2270 | } |
| 2271 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2272 | /* Carry out need_resched()/cond_resched() forward-progress testing. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2273 | static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, |
| 2274 | int *tested, int *tested_tries) |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2275 | { |
Paul E. McKenney | 119248b | 2018-07-18 15:39:37 -0700 | [diff] [blame] | 2276 | unsigned long cver; |
Paul E. McKenney | f4de46e | 2018-07-24 20:50:40 -0700 | [diff] [blame] | 2277 | unsigned long dur; |
Paul E. McKenney | 7c590fc | 2018-08-07 16:42:42 -0700 | [diff] [blame] | 2278 | struct fwd_cb_state fcs; |
Paul E. McKenney | 119248b | 2018-07-18 15:39:37 -0700 | [diff] [blame] | 2279 | unsigned long gps; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2280 | int idx; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2281 | int sd; |
| 2282 | int sd4; |
| 2283 | bool selfpropcb = false; |
| 2284 | unsigned long stopat; |
| 2285 | static DEFINE_TORTURE_RANDOM(trs); |
| 2286 | |
Paul E. McKenney | a7eb937 | 2020-10-09 19:51:55 -0700 | [diff] [blame] | 2287 | if (!cur_ops->sync) |
| 2288 | return; // Cannot do need_resched() forward progress testing without ->sync. |
| 2289 | if (cur_ops->call && cur_ops->cb_barrier) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2290 | init_rcu_head_on_stack(&fcs.rh); |
| 2291 | selfpropcb = true; |
| 2292 | } |
| 2293 | |
| 2294 | /* Tight loop containing cond_resched(). */ |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2295 | WRITE_ONCE(rcu_fwd_cb_nodelay, true); |
| 2296 | cur_ops->sync(); /* Later readers see above write. */ |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2297 | if (selfpropcb) { |
| 2298 | WRITE_ONCE(fcs.stop, 0); |
| 2299 | cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); |
| 2300 | } |
| 2301 | cver = READ_ONCE(rcu_torture_current_version); |
| 2302 | gps = cur_ops->get_gp_seq(); |
| 2303 | sd = cur_ops->stall_dur() + 1; |
| 2304 | sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; |
| 2305 | dur = sd4 + torture_random(&trs) % (sd - sd4); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2306 | WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
| 2307 | stopat = rfp->rcu_fwd_startat + dur; |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2308 | while (time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2309 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2310 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2311 | idx = cur_ops->readlock(); |
| 2312 | udelay(10); |
| 2313 | cur_ops->readunlock(idx); |
| 2314 | if (!fwd_progress_need_resched || need_resched()) |
Paul E. McKenney | fbbd5e3 | 2019-08-15 11:43:53 -0700 | [diff] [blame] | 2315 | cond_resched(); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2316 | } |
| 2317 | (*tested_tries)++; |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2318 | if (!time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2319 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2320 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2321 | (*tested)++; |
| 2322 | cver = READ_ONCE(rcu_torture_current_version) - cver; |
| 2323 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); |
| 2324 | WARN_ON(!cver && gps < 2); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2325 | pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, |
| 2326 | rfp->rcu_fwd_id, dur, cver, gps); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2327 | } |
| 2328 | if (selfpropcb) { |
| 2329 | WRITE_ONCE(fcs.stop, 1); |
| 2330 | cur_ops->sync(); /* Wait for running CB to complete. */ |
| 2331 | cur_ops->cb_barrier(); /* Wait for queued callbacks. */ |
| 2332 | } |
| 2333 | |
| 2334 | if (selfpropcb) { |
| 2335 | WARN_ON(READ_ONCE(fcs.stop) != 2); |
| 2336 | destroy_rcu_head_on_stack(&fcs.rh); |
| 2337 | } |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2338 | schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ |
| 2339 | WRITE_ONCE(rcu_fwd_cb_nodelay, false); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2340 | } |
| 2341 | |
| 2342 | /* Carry out call_rcu() forward-progress testing. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2343 | static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2344 | { |
| 2345 | unsigned long cver; |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 2346 | unsigned long flags; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2347 | unsigned long gps; |
| 2348 | int i; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2349 | long n_launders; |
| 2350 | long n_launders_cb_snap; |
| 2351 | long n_launders_sa; |
| 2352 | long n_max_cbs; |
| 2353 | long n_max_gps; |
| 2354 | struct rcu_fwd_cb *rfcp; |
| 2355 | struct rcu_fwd_cb *rfcpn; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2356 | unsigned long stopat; |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2357 | unsigned long stoppedat; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2358 | |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2359 | if (READ_ONCE(rcu_fwd_emergency_stop)) |
| 2360 | return; /* Get out of the way quickly, no GP wait! */ |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 2361 | if (!cur_ops->call) |
| 2362 | return; /* Can't do call_rcu() fwd prog without ->call. */ |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2363 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2364 | /* Loop continuously posting RCU callbacks. */ |
| 2365 | WRITE_ONCE(rcu_fwd_cb_nodelay, true); |
| 2366 | cur_ops->sync(); /* Later readers see above write. */ |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2367 | WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
| 2368 | stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2369 | n_launders = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2370 | rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2371 | n_launders_sa = 0; |
| 2372 | n_max_cbs = 0; |
| 2373 | n_max_gps = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2374 | for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) |
| 2375 | rfp->n_launders_hist[i].n_launders = 0; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2376 | cver = READ_ONCE(rcu_torture_current_version); |
| 2377 | gps = cur_ops->get_gp_seq(); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2378 | rfp->rcu_launder_gp_seq_start = gps; |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2379 | tick_dep_set_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2380 | while (time_before(jiffies, stopat) && |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2381 | !shutdown_time_arrived() && |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2382 | !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2383 | rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2384 | rfcpn = NULL; |
| 2385 | if (rfcp) |
| 2386 | rfcpn = READ_ONCE(rfcp->rfc_next); |
| 2387 | if (rfcpn) { |
| 2388 | if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && |
| 2389 | ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) |
| 2390 | break; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2391 | rfp->rcu_fwd_cb_head = rfcpn; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2392 | n_launders++; |
| 2393 | n_launders_sa++; |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2394 | } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2395 | rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); |
| 2396 | if (WARN_ON_ONCE(!rfcp)) { |
| 2397 | schedule_timeout_interruptible(1); |
| 2398 | continue; |
| 2399 | } |
| 2400 | n_max_cbs++; |
| 2401 | n_launders_sa = 0; |
| 2402 | rfcp->rfc_gps = 0; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2403 | rfcp->rfc_rfp = rfp; |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2404 | } else { |
| 2405 | rfcp = NULL; |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2406 | } |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2407 | if (rfcp) |
| 2408 | cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); |
Paul E. McKenney | bd1bfc5 | 2019-06-22 14:35:59 -0700 | [diff] [blame] | 2409 | rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); |
Paul E. McKenney | 79ba7ff | 2019-08-04 13:17:35 -0700 | [diff] [blame] | 2410 | if (tick_nohz_full_enabled()) { |
| 2411 | local_irq_save(flags); |
| 2412 | rcu_momentary_dyntick_idle(); |
| 2413 | local_irq_restore(flags); |
| 2414 | } |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2415 | } |
| 2416 | stoppedat = jiffies; |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2417 | n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2418 | cver = READ_ONCE(rcu_torture_current_version) - cver; |
| 2419 | gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); |
| 2420 | cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2421 | (void)rcu_torture_fwd_prog_cbfree(rfp); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2422 | |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 2423 | if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && |
| 2424 | !shutdown_time_arrived()) { |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2425 | WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); |
| 2426 | pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", |
| 2427 | __func__, |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2428 | stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2429 | n_launders + n_max_cbs - n_launders_cb_snap, |
| 2430 | n_launders, n_launders_sa, |
| 2431 | n_max_gps, n_max_cbs, cver, gps); |
Paul E. McKenney | 53b541f | 2021-11-23 13:51:11 -0800 | [diff] [blame] | 2432 | atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2433 | rcu_torture_fwd_cb_hist(rfp); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2434 | } |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2435 | schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ |
Paul E. McKenney | d38e6dc | 2019-07-28 12:00:48 -0700 | [diff] [blame] | 2436 | tick_dep_clear_task(current, TICK_DEP_BIT_RCU); |
Paul E. McKenney | e8516c6 | 2019-04-09 11:06:32 -0700 | [diff] [blame] | 2437 | WRITE_ONCE(rcu_fwd_cb_nodelay, false); |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2438 | } |
| 2439 | |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2440 | |
| 2441 | /* |
| 2442 | * OOM notifier, but this only prints diagnostic information for the |
| 2443 | * current forward-progress test. |
| 2444 | */ |
| 2445 | static int rcutorture_oom_notify(struct notifier_block *self, |
| 2446 | unsigned long notused, void *nfreed) |
| 2447 | { |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2448 | int i; |
| 2449 | long ncbs; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2450 | struct rcu_fwd *rfp; |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2451 | |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2452 | mutex_lock(&rcu_fwd_mutex); |
| 2453 | rfp = rcu_fwds; |
| 2454 | if (!rfp) { |
| 2455 | mutex_unlock(&rcu_fwd_mutex); |
| 2456 | return NOTIFY_OK; |
| 2457 | } |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2458 | WARN(1, "%s invoked upon OOM during forward-progress testing.\n", |
| 2459 | __func__); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2460 | for (i = 0; i < fwd_progress; i++) { |
| 2461 | rcu_torture_fwd_cb_hist(&rfp[i]); |
| 2462 | rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); |
| 2463 | } |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2464 | WRITE_ONCE(rcu_fwd_emergency_stop, true); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2465 | smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2466 | ncbs = 0; |
| 2467 | for (i = 0; i < fwd_progress; i++) |
| 2468 | ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); |
| 2469 | pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2470 | rcu_barrier(); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2471 | ncbs = 0; |
| 2472 | for (i = 0; i < fwd_progress; i++) |
| 2473 | ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); |
| 2474 | pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2475 | rcu_barrier(); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2476 | ncbs = 0; |
| 2477 | for (i = 0; i < fwd_progress; i++) |
| 2478 | ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); |
| 2479 | pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); |
Paul E. McKenney | 2667ccc | 2018-10-05 09:09:49 -0700 | [diff] [blame] | 2480 | smp_mb(); /* Frees before return to avoid redoing OOM. */ |
| 2481 | (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ |
| 2482 | pr_info("%s returning after OOM processing.\n", __func__); |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2483 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 2484 | return NOTIFY_OK; |
| 2485 | } |
| 2486 | |
| 2487 | static struct notifier_block rcutorture_oom_nb = { |
| 2488 | .notifier_call = rcutorture_oom_notify |
| 2489 | }; |
| 2490 | |
Paul E. McKenney | 6b3de7a | 2018-08-28 14:38:43 -0700 | [diff] [blame] | 2491 | /* Carry out grace-period forward-progress testing. */ |
| 2492 | static int rcu_torture_fwd_prog(void *args) |
| 2493 | { |
Paul E. McKenney | 53b541f | 2021-11-23 13:51:11 -0800 | [diff] [blame] | 2494 | bool firsttime = true; |
| 2495 | long max_cbs; |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 2496 | int oldnice = task_nice(current); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2497 | unsigned long oldseq = READ_ONCE(rcu_fwd_seq); |
Paul E. McKenney | 6b1b832 | 2019-11-05 09:08:58 -0800 | [diff] [blame] | 2498 | struct rcu_fwd *rfp = args; |
Paul E. McKenney | f4de46e | 2018-07-24 20:50:40 -0700 | [diff] [blame] | 2499 | int tested = 0; |
Paul E. McKenney | 152f4af | 2018-07-19 10:57:58 -0700 | [diff] [blame] | 2500 | int tested_tries = 0; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2501 | |
| 2502 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); |
Paul E. McKenney | 5ab7ab8 | 2018-09-21 18:08:09 -0700 | [diff] [blame] | 2503 | rcu_bind_current_to_nocb(); |
Paul E. McKenney | fecad50 | 2018-07-20 12:18:11 -0700 | [diff] [blame] | 2504 | if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) |
| 2505 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2506 | do { |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2507 | if (!rfp->rcu_fwd_id) { |
| 2508 | schedule_timeout_interruptible(fwd_progress_holdoff * HZ); |
| 2509 | WRITE_ONCE(rcu_fwd_emergency_stop, false); |
Paul E. McKenney | 53b541f | 2021-11-23 13:51:11 -0800 | [diff] [blame] | 2510 | if (!firsttime) { |
| 2511 | max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); |
| 2512 | pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); |
| 2513 | } |
| 2514 | firsttime = false; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2515 | WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); |
| 2516 | } else { |
| 2517 | while (READ_ONCE(rcu_fwd_seq) == oldseq) |
| 2518 | schedule_timeout_interruptible(1); |
| 2519 | oldseq = READ_ONCE(rcu_fwd_seq); |
| 2520 | } |
| 2521 | pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); |
| 2522 | if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) |
Paul E. McKenney | 4355080 | 2019-12-04 15:58:41 -0800 | [diff] [blame] | 2523 | rcu_torture_fwd_prog_cr(rfp); |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2524 | if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && |
| 2525 | (!IS_ENABLED(CONFIG_TINY_RCU) || |
| 2526 | (rcu_inkernel_boot_has_ended() && |
| 2527 | torture_num_online_cpus() > rfp->rcu_fwd_id))) |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2528 | rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); |
Paul E. McKenney | 4871848 | 2018-08-15 15:32:51 -0700 | [diff] [blame] | 2529 | |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2530 | /* Avoid slow periods, better to test when busy. */ |
Paul E. McKenney | ab1b788 | 2020-09-22 16:42:42 -0700 | [diff] [blame] | 2531 | if (stutter_wait("rcu_torture_fwd_prog")) |
| 2532 | sched_set_normal(current, oldnice); |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2533 | } while (!torture_must_stop()); |
Paul E. McKenney | 152f4af | 2018-07-19 10:57:58 -0700 | [diff] [blame] | 2534 | /* Short runs might not contain a valid forward-progress attempt. */ |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2535 | if (!rfp->rcu_fwd_id) { |
| 2536 | WARN_ON(!tested && tested_tries >= 5); |
| 2537 | pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); |
| 2538 | } |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2539 | torture_kthread_stopping("rcu_torture_fwd_prog"); |
| 2540 | return 0; |
| 2541 | } |
| 2542 | |
| 2543 | /* If forward-progress checking is requested and feasible, spawn the thread. */ |
| 2544 | static int __init rcu_torture_fwd_prog_init(void) |
| 2545 | { |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2546 | int i; |
| 2547 | int ret = 0; |
Paul E. McKenney | 5155be9 | 2019-11-06 08:35:08 -0800 | [diff] [blame] | 2548 | struct rcu_fwd *rfp; |
Paul E. McKenney | 6764100 | 2019-11-06 08:20:20 -0800 | [diff] [blame] | 2549 | |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2550 | if (!fwd_progress) |
| 2551 | return 0; /* Not requested, so don't do it. */ |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2552 | if (fwd_progress >= nr_cpu_ids) { |
| 2553 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); |
| 2554 | fwd_progress = nr_cpu_ids; |
| 2555 | } else if (fwd_progress < 0) { |
| 2556 | fwd_progress = nr_cpu_ids; |
| 2557 | } |
Paul E. McKenney | a7eb937 | 2020-10-09 19:51:55 -0700 | [diff] [blame] | 2558 | if ((!cur_ops->sync && !cur_ops->call) || |
Paul E. McKenney | 613b00f | 2021-11-23 11:53:52 -0800 | [diff] [blame] | 2559 | (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || |
| 2560 | cur_ops == &rcu_busted_ops) { |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2561 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2562 | fwd_progress = 0; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2563 | return 0; |
| 2564 | } |
| 2565 | if (stall_cpu > 0) { |
| 2566 | VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2567 | fwd_progress = 0; |
Zhouyi Zhou | 3ac8587 | 2021-07-26 05:43:33 +0800 | [diff] [blame] | 2568 | if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2569 | return -EINVAL; /* In module, can fail back to user. */ |
| 2570 | WARN_ON(1); /* Make sure rcutorture notices conflict. */ |
| 2571 | return 0; |
| 2572 | } |
| 2573 | if (fwd_progress_holdoff <= 0) |
| 2574 | fwd_progress_holdoff = 1; |
| 2575 | if (fwd_progress_div <= 0) |
| 2576 | fwd_progress_div = 4; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2577 | rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); |
| 2578 | fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); |
| 2579 | if (!rfp || !fwd_prog_tasks) { |
| 2580 | kfree(rfp); |
| 2581 | kfree(fwd_prog_tasks); |
| 2582 | fwd_prog_tasks = NULL; |
| 2583 | fwd_progress = 0; |
Paul E. McKenney | 5155be9 | 2019-11-06 08:35:08 -0800 | [diff] [blame] | 2584 | return -ENOMEM; |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2585 | } |
| 2586 | for (i = 0; i < fwd_progress; i++) { |
| 2587 | spin_lock_init(&rfp[i].rcu_fwd_lock); |
| 2588 | rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; |
| 2589 | rfp[i].rcu_fwd_id = i; |
| 2590 | } |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2591 | mutex_lock(&rcu_fwd_mutex); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2592 | rcu_fwds = rfp; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2593 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | 299c7d9 | 2020-07-22 10:45:12 -0700 | [diff] [blame] | 2594 | register_oom_notifier(&rcutorture_oom_nb); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2595 | for (i = 0; i < fwd_progress; i++) { |
| 2596 | ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); |
| 2597 | if (ret) { |
| 2598 | fwd_progress = i; |
| 2599 | return ret; |
| 2600 | } |
| 2601 | } |
| 2602 | return 0; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 2603 | } |
| 2604 | |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2605 | static void rcu_torture_fwd_prog_cleanup(void) |
| 2606 | { |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2607 | int i; |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2608 | struct rcu_fwd *rfp; |
| 2609 | |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2610 | if (!rcu_fwds || !fwd_prog_tasks) |
| 2611 | return; |
| 2612 | for (i = 0; i < fwd_progress; i++) |
| 2613 | torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); |
| 2614 | unregister_oom_notifier(&rcutorture_oom_nb); |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2615 | mutex_lock(&rcu_fwd_mutex); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2616 | rfp = rcu_fwds; |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2617 | rcu_fwds = NULL; |
Paul E. McKenney | 57f6020 | 2020-07-20 08:34:07 -0700 | [diff] [blame] | 2618 | mutex_unlock(&rcu_fwd_mutex); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2619 | kfree(rfp); |
Paul E. McKenney | 82e3100 | 2021-11-22 20:55:18 -0800 | [diff] [blame] | 2620 | kfree(fwd_prog_tasks); |
| 2621 | fwd_prog_tasks = NULL; |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2622 | } |
| 2623 | |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2624 | /* Callback function for RCU barrier testing. */ |
Rashika Kheria | b3b8a4d | 2014-02-27 17:16:57 +0530 | [diff] [blame] | 2625 | static void rcu_torture_barrier_cbf(struct rcu_head *rcu) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2626 | { |
| 2627 | atomic_inc(&barrier_cbs_invoked); |
| 2628 | } |
| 2629 | |
Paul E. McKenney | 50d4b62 | 2020-02-04 15:00:56 -0800 | [diff] [blame] | 2630 | /* IPI handler to get callback posted on desired CPU, if online. */ |
| 2631 | static void rcu_torture_barrier1cb(void *rcu_void) |
| 2632 | { |
| 2633 | struct rcu_head *rhp = rcu_void; |
| 2634 | |
| 2635 | cur_ops->call(rhp, rcu_torture_barrier_cbf); |
| 2636 | } |
| 2637 | |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2638 | /* kthread function to register callbacks used to test RCU barriers. */ |
| 2639 | static int rcu_torture_barrier_cbs(void *arg) |
| 2640 | { |
| 2641 | long myid = (long)arg; |
Jules Irenge | 8f43d59 | 2020-06-01 19:45:48 +0100 | [diff] [blame] | 2642 | bool lastphase = false; |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2643 | bool newphase; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2644 | struct rcu_head rcu; |
| 2645 | |
| 2646 | init_rcu_head_on_stack(&rcu); |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2647 | VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); |
Linus Torvalds | 971eae7 | 2014-03-31 11:21:19 -0700 | [diff] [blame] | 2648 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2649 | do { |
| 2650 | wait_event(barrier_cbs_wq[myid], |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2651 | (newphase = |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2652 | smp_load_acquire(&barrier_phase)) != lastphase || |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2653 | torture_must_stop()); |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2654 | lastphase = newphase; |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2655 | if (torture_must_stop()) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2656 | break; |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2657 | /* |
| 2658 | * The above smp_load_acquire() ensures barrier_phase load |
Paul E. McKenney | aab0573 | 2016-05-02 12:20:51 -0700 | [diff] [blame] | 2659 | * is ordered before the following ->call(). |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2660 | */ |
Paul E. McKenney | 50d4b62 | 2020-02-04 15:00:56 -0800 | [diff] [blame] | 2661 | if (smp_call_function_single(myid, rcu_torture_barrier1cb, |
| 2662 | &rcu, 1)) { |
| 2663 | // IPI failed, so use direct call from current CPU. |
| 2664 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); |
| 2665 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2666 | if (atomic_dec_and_test(&barrier_cbs_count)) |
| 2667 | wake_up(&barrier_wq); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2668 | } while (!torture_must_stop()); |
Paul E. McKenney | 69c6045 | 2014-07-01 11:59:36 -0700 | [diff] [blame] | 2669 | if (cur_ops->cb_barrier != NULL) |
| 2670 | cur_ops->cb_barrier(); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2671 | destroy_rcu_head_on_stack(&rcu); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 2672 | torture_kthread_stopping("rcu_torture_barrier_cbs"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2673 | return 0; |
| 2674 | } |
| 2675 | |
| 2676 | /* kthread function to drive and coordinate RCU barrier testing. */ |
| 2677 | static int rcu_torture_barrier(void *arg) |
| 2678 | { |
| 2679 | int i; |
| 2680 | |
Paul E. McKenney | 5ccf60f | 2014-01-29 07:25:25 -0800 | [diff] [blame] | 2681 | VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2682 | do { |
| 2683 | atomic_set(&barrier_cbs_invoked, 0); |
| 2684 | atomic_set(&barrier_cbs_count, n_barrier_cbs); |
Paul E. McKenney | 6c7ed42 | 2015-04-13 11:58:08 -0700 | [diff] [blame] | 2685 | /* Ensure barrier_phase ordered after prior assignments. */ |
| 2686 | smp_store_release(&barrier_phase, !barrier_phase); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2687 | for (i = 0; i < n_barrier_cbs; i++) |
| 2688 | wake_up(&barrier_cbs_wq[i]); |
| 2689 | wait_event(barrier_wq, |
| 2690 | atomic_read(&barrier_cbs_count) == 0 || |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2691 | torture_must_stop()); |
| 2692 | if (torture_must_stop()) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2693 | break; |
| 2694 | n_barrier_attempts++; |
Paul E. McKenney | 78e4bc3 | 2013-09-24 15:04:06 -0700 | [diff] [blame] | 2695 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2696 | if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { |
| 2697 | n_rcu_torture_barrier_error++; |
Paul E. McKenney | 7602de4a | 2014-12-17 18:39:54 -0800 | [diff] [blame] | 2698 | pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", |
| 2699 | atomic_read(&barrier_cbs_invoked), |
| 2700 | n_barrier_cbs); |
Paul E. McKenney | 9470a18 | 2020-02-05 12:54:34 -0800 | [diff] [blame] | 2701 | WARN_ON(1); |
| 2702 | // Wait manually for the remaining callbacks |
| 2703 | i = 0; |
| 2704 | do { |
| 2705 | if (WARN_ON(i++ > HZ)) |
| 2706 | i = INT_MIN; |
| 2707 | schedule_timeout_interruptible(1); |
| 2708 | cur_ops->cb_barrier(); |
| 2709 | } while (atomic_read(&barrier_cbs_invoked) != |
| 2710 | n_barrier_cbs && |
| 2711 | !torture_must_stop()); |
| 2712 | smp_mb(); // Can't trust ordering if broken. |
| 2713 | if (!torture_must_stop()) |
| 2714 | pr_err("Recovered: barrier_cbs_invoked = %d\n", |
| 2715 | atomic_read(&barrier_cbs_invoked)); |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 2716 | } else { |
| 2717 | n_barrier_successes++; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2718 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2719 | schedule_timeout_interruptible(HZ / 10); |
Paul E. McKenney | 36970bb | 2014-01-30 15:49:29 -0800 | [diff] [blame] | 2720 | } while (!torture_must_stop()); |
Paul E. McKenney | 7fafaac | 2014-01-31 17:37:28 -0800 | [diff] [blame] | 2721 | torture_kthread_stopping("rcu_torture_barrier"); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2722 | return 0; |
| 2723 | } |
| 2724 | |
| 2725 | /* Initialize RCU barrier testing. */ |
| 2726 | static int rcu_torture_barrier_init(void) |
| 2727 | { |
| 2728 | int i; |
| 2729 | int ret; |
| 2730 | |
Paul E. McKenney | d9eba768 | 2015-05-14 15:35:43 -0700 | [diff] [blame] | 2731 | if (n_barrier_cbs <= 0) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2732 | return 0; |
| 2733 | if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 2734 | pr_alert("%s" TORTURE_FLAG |
| 2735 | " Call or barrier ops missing for %s,\n", |
| 2736 | torture_type, cur_ops->name); |
| 2737 | pr_alert("%s" TORTURE_FLAG |
| 2738 | " RCU barrier testing omitted from run.\n", |
| 2739 | torture_type); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2740 | return 0; |
| 2741 | } |
| 2742 | atomic_set(&barrier_cbs_count, 0); |
| 2743 | atomic_set(&barrier_cbs_invoked, 0); |
| 2744 | barrier_cbs_tasks = |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 2745 | kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2746 | GFP_KERNEL); |
| 2747 | barrier_cbs_wq = |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 2748 | kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); |
Sasha Levin | de5e643 | 2012-12-20 14:11:28 -0500 | [diff] [blame] | 2749 | if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2750 | return -ENOMEM; |
| 2751 | for (i = 0; i < n_barrier_cbs; i++) { |
| 2752 | init_waitqueue_head(&barrier_cbs_wq[i]); |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2753 | ret = torture_create_kthread(rcu_torture_barrier_cbs, |
| 2754 | (void *)(long)i, |
| 2755 | barrier_cbs_tasks[i]); |
| 2756 | if (ret) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2757 | return ret; |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2758 | } |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 2759 | return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2760 | } |
| 2761 | |
| 2762 | /* Clean up after RCU barrier testing. */ |
| 2763 | static void rcu_torture_barrier_cleanup(void) |
| 2764 | { |
| 2765 | int i; |
| 2766 | |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2767 | torture_stop_kthread(rcu_torture_barrier, barrier_task); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2768 | if (barrier_cbs_tasks != NULL) { |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2769 | for (i = 0; i < n_barrier_cbs; i++) |
| 2770 | torture_stop_kthread(rcu_torture_barrier_cbs, |
| 2771 | barrier_cbs_tasks[i]); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2772 | kfree(barrier_cbs_tasks); |
| 2773 | barrier_cbs_tasks = NULL; |
| 2774 | } |
| 2775 | if (barrier_cbs_wq != NULL) { |
| 2776 | kfree(barrier_cbs_wq); |
| 2777 | barrier_cbs_wq = NULL; |
| 2778 | } |
| 2779 | } |
| 2780 | |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2781 | static bool rcu_torture_can_boost(void) |
| 2782 | { |
| 2783 | static int boost_warn_once; |
| 2784 | int prio; |
| 2785 | |
| 2786 | if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) |
| 2787 | return false; |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 2788 | if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) |
Paul E. McKenney | 5e59fba | 2021-01-15 13:30:38 -0800 | [diff] [blame] | 2789 | return false; |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2790 | |
| 2791 | prio = rcu_get_gp_kthreads_prio(); |
| 2792 | if (!prio) |
| 2793 | return false; |
| 2794 | |
| 2795 | if (prio < 2) { |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 2796 | if (boost_warn_once == 1) |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2797 | return false; |
| 2798 | |
Joel Fernandes (Google) | bf5b643 | 2018-06-19 15:14:19 -0700 | [diff] [blame] | 2799 | pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 2800 | boost_warn_once = 1; |
| 2801 | return false; |
| 2802 | } |
| 2803 | |
| 2804 | return true; |
| 2805 | } |
| 2806 | |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2807 | static bool read_exit_child_stop; |
| 2808 | static bool read_exit_child_stopped; |
| 2809 | static wait_queue_head_t read_exit_wq; |
| 2810 | |
| 2811 | // Child kthread which just does an rcutorture reader and exits. |
| 2812 | static int rcu_torture_read_exit_child(void *trsp_in) |
| 2813 | { |
| 2814 | struct torture_random_state *trsp = trsp_in; |
| 2815 | |
| 2816 | set_user_nice(current, MAX_NICE); |
| 2817 | // Minimize time between reading and exiting. |
| 2818 | while (!kthread_should_stop()) |
| 2819 | schedule_timeout_uninterruptible(1); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 2820 | (void)rcu_torture_one_read(trsp, -1); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2821 | return 0; |
| 2822 | } |
| 2823 | |
| 2824 | // Parent kthread which creates and destroys read-exit child kthreads. |
| 2825 | static int rcu_torture_read_exit(void *unused) |
| 2826 | { |
| 2827 | int count = 0; |
| 2828 | bool errexit = false; |
| 2829 | int i; |
| 2830 | struct task_struct *tsp; |
| 2831 | DEFINE_TORTURE_RANDOM(trs); |
| 2832 | |
| 2833 | // Allocate and initialize. |
| 2834 | set_user_nice(current, MAX_NICE); |
| 2835 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); |
| 2836 | |
| 2837 | // Each pass through this loop does one read-exit episode. |
| 2838 | do { |
| 2839 | if (++count > read_exit_burst) { |
| 2840 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); |
| 2841 | rcu_barrier(); // Wait for task_struct free, avoid OOM. |
| 2842 | for (i = 0; i < read_exit_delay; i++) { |
| 2843 | schedule_timeout_uninterruptible(HZ); |
| 2844 | if (READ_ONCE(read_exit_child_stop)) |
| 2845 | break; |
| 2846 | } |
| 2847 | if (!READ_ONCE(read_exit_child_stop)) |
| 2848 | VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); |
| 2849 | count = 0; |
| 2850 | } |
| 2851 | if (READ_ONCE(read_exit_child_stop)) |
| 2852 | break; |
| 2853 | // Spawn child. |
| 2854 | tsp = kthread_run(rcu_torture_read_exit_child, |
| 2855 | &trs, "%s", |
| 2856 | "rcu_torture_read_exit_child"); |
| 2857 | if (IS_ERR(tsp)) { |
Li Zhijian | 81faa4f | 2021-11-03 16:30:28 +0800 | [diff] [blame] | 2858 | TOROUT_ERRSTRING("out of memory"); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2859 | errexit = true; |
| 2860 | tsp = NULL; |
| 2861 | break; |
| 2862 | } |
| 2863 | cond_resched(); |
| 2864 | kthread_stop(tsp); |
| 2865 | n_read_exits ++; |
| 2866 | stutter_wait("rcu_torture_read_exit"); |
| 2867 | } while (!errexit && !READ_ONCE(read_exit_child_stop)); |
| 2868 | |
| 2869 | // Clean up and exit. |
| 2870 | smp_store_release(&read_exit_child_stopped, true); // After reaping. |
| 2871 | smp_mb(); // Store before wakeup. |
| 2872 | wake_up(&read_exit_wq); |
| 2873 | while (!torture_must_stop()) |
| 2874 | schedule_timeout_uninterruptible(1); |
| 2875 | torture_kthread_stopping("rcu_torture_read_exit"); |
| 2876 | return 0; |
| 2877 | } |
| 2878 | |
| 2879 | static int rcu_torture_read_exit_init(void) |
| 2880 | { |
| 2881 | if (read_exit_burst <= 0) |
Paul E. McKenney | fda8486 | 2021-08-03 17:42:25 -0700 | [diff] [blame] | 2882 | return 0; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2883 | init_waitqueue_head(&read_exit_wq); |
| 2884 | read_exit_child_stop = false; |
| 2885 | read_exit_child_stopped = false; |
| 2886 | return torture_create_kthread(rcu_torture_read_exit, NULL, |
| 2887 | read_exit_task); |
| 2888 | } |
| 2889 | |
| 2890 | static void rcu_torture_read_exit_cleanup(void) |
| 2891 | { |
| 2892 | if (!read_exit_task) |
| 2893 | return; |
| 2894 | WRITE_ONCE(read_exit_child_stop, true); |
| 2895 | smp_mb(); // Above write before wait. |
| 2896 | wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); |
| 2897 | torture_stop_kthread(rcutorture_read_exit, read_exit_task); |
| 2898 | } |
| 2899 | |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2900 | static enum cpuhp_state rcutor_hp; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 2901 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2902 | static void |
| 2903 | rcu_torture_cleanup(void) |
| 2904 | { |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 2905 | int firsttime; |
Paul E. McKenney | 034777d | 2018-04-19 08:43:11 -0700 | [diff] [blame] | 2906 | int flags = 0; |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 2907 | unsigned long gp_seq = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2908 | int i; |
| 2909 | |
Davidlohr Bueso | d36a7a0 | 2014-09-11 20:40:21 -0700 | [diff] [blame] | 2910 | if (torture_cleanup_begin()) { |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 2911 | if (cur_ops->cb_barrier != NULL) |
| 2912 | cur_ops->cb_barrier(); |
| 2913 | return; |
| 2914 | } |
Paul E. McKenney | b813afa | 2019-03-21 09:27:28 -0700 | [diff] [blame] | 2915 | if (!cur_ops) { |
| 2916 | torture_cleanup_end(); |
| 2917 | return; |
| 2918 | } |
Paul E. McKenney | 3808dc9 | 2014-01-28 15:29:21 -0800 | [diff] [blame] | 2919 | |
Paul E. McKenney | 27c0f14 | 2020-09-15 17:08:03 -0700 | [diff] [blame] | 2920 | if (cur_ops->gp_kthread_dbg) |
| 2921 | cur_ops->gp_kthread_dbg(); |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 2922 | rcu_torture_read_exit_cleanup(); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 2923 | rcu_torture_barrier_cleanup(); |
Paul E. McKenney | c8fa637 | 2020-07-19 14:40:31 -0700 | [diff] [blame] | 2924 | rcu_torture_fwd_prog_cleanup(); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2925 | torture_stop_kthread(rcu_torture_stall, stall_task); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2926 | torture_stop_kthread(rcu_torture_writer, writer_task); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2927 | |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 2928 | if (nocb_tasks) { |
| 2929 | for (i = 0; i < nrealnocbers; i++) |
| 2930 | torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); |
| 2931 | kfree(nocb_tasks); |
| 2932 | nocb_tasks = NULL; |
| 2933 | } |
| 2934 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 2935 | if (reader_tasks) { |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2936 | for (i = 0; i < nrealreaders; i++) |
| 2937 | torture_stop_kthread(rcu_torture_reader, |
| 2938 | reader_tasks[i]); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2939 | kfree(reader_tasks); |
Paul E. McKenney | 293b93d | 2020-09-23 16:46:36 -0700 | [diff] [blame] | 2940 | reader_tasks = NULL; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2941 | } |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 2942 | kfree(rcu_torture_reader_mbchk); |
| 2943 | rcu_torture_reader_mbchk = NULL; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2944 | |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 2945 | if (fakewriter_tasks) { |
Paul E. McKenney | 293b93d | 2020-09-23 16:46:36 -0700 | [diff] [blame] | 2946 | for (i = 0; i < nfakewriters; i++) |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2947 | torture_stop_kthread(rcu_torture_fakewriter, |
| 2948 | fakewriter_tasks[i]); |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 2949 | kfree(fakewriter_tasks); |
| 2950 | fakewriter_tasks = NULL; |
| 2951 | } |
| 2952 | |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 2953 | rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); |
| 2954 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 2955 | pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", |
| 2956 | cur_ops->name, (long)gp_seq, flags, |
| 2957 | rcutorture_seq_diff(gp_seq, start_gp_seq)); |
Paul E. McKenney | 9c029b8 | 2014-02-04 11:47:08 -0800 | [diff] [blame] | 2958 | torture_stop_kthread(rcu_torture_stats, stats_task); |
| 2959 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
Paul E. McKenney | fd13fe1 | 2021-08-06 08:57:26 -0700 | [diff] [blame] | 2960 | if (rcu_torture_can_boost() && rcutor_hp >= 0) |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 2961 | cpuhp_remove_state(rcutor_hp); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 2962 | |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 2963 | /* |
Paul E. McKenney | 62a1a94 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 2964 | * Wait for all RCU callbacks to fire, then do torture-type-specific |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 2965 | * cleanup operations. |
| 2966 | */ |
Paul E. McKenney | 2326974 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 2967 | if (cur_ops->cb_barrier != NULL) |
| 2968 | cur_ops->cb_barrier(); |
Paul E. McKenney | ca1d51e | 2015-04-14 12:28:22 -0700 | [diff] [blame] | 2969 | if (cur_ops->cleanup != NULL) |
| 2970 | cur_ops->cleanup(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2971 | |
Paul E. McKenney | 7ab2bd3 | 2021-05-02 19:56:05 -0700 | [diff] [blame] | 2972 | rcu_torture_mem_dump_obj(); |
| 2973 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 2974 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 2975 | |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 2976 | if (err_segs_recorded) { |
| 2977 | pr_alert("Failure/close-call rcutorture reader segments:\n"); |
| 2978 | if (rt_read_nsegs == 0) |
| 2979 | pr_alert("\t: No segments recorded!!!\n"); |
| 2980 | firsttime = 1; |
| 2981 | for (i = 0; i < rt_read_nsegs; i++) { |
| 2982 | pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); |
| 2983 | if (err_segs[i].rt_delay_jiffies != 0) { |
| 2984 | pr_cont("%s%ldjiffies", firsttime ? "" : "+", |
| 2985 | err_segs[i].rt_delay_jiffies); |
| 2986 | firsttime = 0; |
| 2987 | } |
| 2988 | if (err_segs[i].rt_delay_ms != 0) { |
| 2989 | pr_cont("%s%ldms", firsttime ? "" : "+", |
| 2990 | err_segs[i].rt_delay_ms); |
| 2991 | firsttime = 0; |
| 2992 | } |
| 2993 | if (err_segs[i].rt_delay_us != 0) { |
| 2994 | pr_cont("%s%ldus", firsttime ? "" : "+", |
| 2995 | err_segs[i].rt_delay_us); |
| 2996 | firsttime = 0; |
| 2997 | } |
| 2998 | pr_cont("%s\n", |
| 2999 | err_segs[i].rt_preempted ? "preempted" : ""); |
| 3000 | |
| 3001 | } |
| 3002 | } |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 3003 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3004 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); |
Paul E. McKenney | 2e9e808 | 2014-01-28 15:58:22 -0800 | [diff] [blame] | 3005 | else if (torture_onoff_failures()) |
Paul E. McKenney | 091541b | 2012-01-10 12:51:14 -0800 | [diff] [blame] | 3006 | rcu_torture_print_module_parms(cur_ops, |
| 3007 | "End of test: RCU_HOTPLUG"); |
Paul E. McKenney | 95c3832 | 2006-03-24 03:15:58 -0800 | [diff] [blame] | 3008 | else |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3009 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); |
Davidlohr Bueso | d36a7a0 | 2014-09-11 20:40:21 -0700 | [diff] [blame] | 3010 | torture_cleanup_end(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3011 | } |
| 3012 | |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3013 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 3014 | static void rcu_torture_leak_cb(struct rcu_head *rhp) |
| 3015 | { |
| 3016 | } |
| 3017 | |
| 3018 | static void rcu_torture_err_cb(struct rcu_head *rhp) |
| 3019 | { |
| 3020 | /* |
| 3021 | * This -might- happen due to race conditions, but is unlikely. |
| 3022 | * The scenario that leads to this happening is that the |
| 3023 | * first of the pair of duplicate callbacks is queued, |
| 3024 | * someone else starts a grace period that includes that |
| 3025 | * callback, then the second of the pair must wait for the |
| 3026 | * next grace period. Unlikely, but can happen. If it |
| 3027 | * does happen, the debug-objects subsystem won't have splatted. |
| 3028 | */ |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 3029 | pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3030 | } |
| 3031 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 3032 | |
| 3033 | /* |
| 3034 | * Verify that double-free causes debug-objects to complain, but only |
| 3035 | * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test |
| 3036 | * cannot be carried out. |
| 3037 | */ |
| 3038 | static void rcu_test_debug_objects(void) |
| 3039 | { |
| 3040 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 3041 | struct rcu_head rh1; |
| 3042 | struct rcu_head rh2; |
Paul E. McKenney | edf7b84 | 2020-12-02 17:52:07 -0800 | [diff] [blame] | 3043 | struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3044 | |
| 3045 | init_rcu_head_on_stack(&rh1); |
| 3046 | init_rcu_head_on_stack(&rh2); |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 3047 | pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3048 | |
| 3049 | /* Try to queue the rh2 pair of callbacks for the same grace period. */ |
| 3050 | preempt_disable(); /* Prevent preemption from interrupting test. */ |
| 3051 | rcu_read_lock(); /* Make it impossible to finish a grace period. */ |
| 3052 | call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ |
| 3053 | local_irq_disable(); /* Make it harder to start a new grace period. */ |
| 3054 | call_rcu(&rh2, rcu_torture_leak_cb); |
| 3055 | call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ |
Paul E. McKenney | edf7b84 | 2020-12-02 17:52:07 -0800 | [diff] [blame] | 3056 | if (rhp) { |
| 3057 | call_rcu(rhp, rcu_torture_leak_cb); |
| 3058 | call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ |
| 3059 | } |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3060 | local_irq_enable(); |
| 3061 | rcu_read_unlock(); |
| 3062 | preempt_enable(); |
| 3063 | |
| 3064 | /* Wait for them all to get done so we can safely return. */ |
| 3065 | rcu_barrier(); |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 3066 | pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3067 | destroy_rcu_head_on_stack(&rh1); |
| 3068 | destroy_rcu_head_on_stack(&rh2); |
| 3069 | #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
Paul E. McKenney | e0d31a34c | 2017-12-01 15:22:38 -0800 | [diff] [blame] | 3070 | pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3071 | #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 3072 | } |
| 3073 | |
Paul E. McKenney | 3a6cb58 | 2018-12-10 09:44:52 -0800 | [diff] [blame] | 3074 | static void rcutorture_sync(void) |
| 3075 | { |
| 3076 | static unsigned long n; |
| 3077 | |
| 3078 | if (cur_ops->sync && !(++n & 0xfff)) |
| 3079 | cur_ops->sync(); |
| 3080 | } |
| 3081 | |
Josh Triplett | 6f8bc500 | 2007-05-08 00:25:24 -0700 | [diff] [blame] | 3082 | static int __init |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3083 | rcu_torture_init(void) |
| 3084 | { |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 3085 | long i; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3086 | int cpu; |
| 3087 | int firsterr = 0; |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 3088 | int flags = 0; |
| 3089 | unsigned long gp_seq = 0; |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 3090 | static struct rcu_torture_ops *torture_ops[] = { |
Paul E. McKenney | c770c82 | 2018-07-07 10:28:07 -0700 | [diff] [blame] | 3091 | &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, |
Paul E. McKenney | c1a76c0 | 2020-03-10 10:32:30 -0700 | [diff] [blame] | 3092 | &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, |
| 3093 | &tasks_tracing_ops, &trivial_ops, |
Paul E. McKenney | 2ec1f2d | 2013-06-12 15:12:21 -0700 | [diff] [blame] | 3094 | }; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3095 | |
Paul E. McKenney | a2f2577 | 2017-11-21 20:19:17 -0800 | [diff] [blame] | 3096 | if (!torture_init_begin(torture_type, verbose)) |
Paul E. McKenney | 5228084 | 2014-04-07 09:14:11 -0700 | [diff] [blame] | 3097 | return -EBUSY; |
Paul E. McKenney | 343e909 | 2008-12-15 16:13:07 -0800 | [diff] [blame] | 3098 | |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3099 | /* Process args and tell the world that the torturer is on the job. */ |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 3100 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3101 | cur_ops = torture_ops[i]; |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 3102 | if (strcmp(torture_type, cur_ops->name) == 0) |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3103 | break; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3104 | } |
Josh Triplett | ade5fb8 | 2007-05-08 00:33:22 -0700 | [diff] [blame] | 3105 | if (i == ARRAY_SIZE(torture_ops)) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 3106 | pr_alert("rcu-torture: invalid torture type: \"%s\"\n", |
| 3107 | torture_type); |
| 3108 | pr_alert("rcu-torture types:"); |
Paul E. McKenney | cf886c4 | 2009-10-25 19:03:54 -0700 | [diff] [blame] | 3109 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 3110 | pr_cont(" %s", torture_ops[i]->name); |
| 3111 | pr_cont("\n"); |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 3112 | firsterr = -EINVAL; |
Paul E. McKenney | b813afa | 2019-03-21 09:27:28 -0700 | [diff] [blame] | 3113 | cur_ops = NULL; |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 3114 | goto unwind; |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3115 | } |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3116 | if (cur_ops->fqs == NULL && fqs_duration != 0) { |
Paul E. McKenney | 2caa1e4 | 2012-08-09 16:30:45 -0700 | [diff] [blame] | 3117 | pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3118 | fqs_duration = 0; |
| 3119 | } |
Josh Triplett | c8e5b16 | 2007-05-08 00:33:20 -0700 | [diff] [blame] | 3120 | if (cur_ops->init) |
Paul E. McKenney | 889d487 | 2015-08-24 11:37:58 -0700 | [diff] [blame] | 3121 | cur_ops->init(); |
Paul E. McKenney | 72e9bb5 | 2006-06-27 02:54:03 -0700 | [diff] [blame] | 3122 | |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 3123 | if (nreaders >= 0) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3124 | nrealreaders = nreaders; |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 3125 | } else { |
Paul E. McKenney | 3838cc1 | 2015-03-12 13:55:48 -0700 | [diff] [blame] | 3126 | nrealreaders = num_online_cpus() - 2 - nreaders; |
Paul E. McKenney | 64e4b43 | 2014-03-12 10:26:35 -0700 | [diff] [blame] | 3127 | if (nrealreaders <= 0) |
| 3128 | nrealreaders = 1; |
| 3129 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3130 | rcu_torture_print_module_parms(cur_ops, "Start of test"); |
Joel Fernandes (Google) | 959954d | 2020-06-18 16:29:55 -0400 | [diff] [blame] | 3131 | rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); |
| 3132 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); |
| 3133 | start_gp_seq = gp_seq; |
| 3134 | pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", |
| 3135 | cur_ops->name, (long)gp_seq, flags); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3136 | |
| 3137 | /* Set up the freelist. */ |
| 3138 | |
| 3139 | INIT_LIST_HEAD(&rcu_torture_freelist); |
Ahmed S. Darwish | 788e770 | 2007-05-08 00:33:14 -0700 | [diff] [blame] | 3140 | for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 3141 | rcu_tortures[i].rtort_mbtest = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3142 | list_add_tail(&rcu_tortures[i].rtort_free, |
| 3143 | &rcu_torture_freelist); |
| 3144 | } |
| 3145 | |
| 3146 | /* Initialize the statistics so that each run gets its own numbers. */ |
| 3147 | |
| 3148 | rcu_torture_current = NULL; |
| 3149 | rcu_torture_current_version = 0; |
| 3150 | atomic_set(&n_rcu_torture_alloc, 0); |
| 3151 | atomic_set(&n_rcu_torture_alloc_fail, 0); |
| 3152 | atomic_set(&n_rcu_torture_free, 0); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 3153 | atomic_set(&n_rcu_torture_mberror, 0); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 3154 | atomic_set(&n_rcu_torture_mbchk_fail, 0); |
| 3155 | atomic_set(&n_rcu_torture_mbchk_tries, 0); |
Paul E. McKenney | 996417d | 2005-11-18 01:10:50 -0800 | [diff] [blame] | 3156 | atomic_set(&n_rcu_torture_error, 0); |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 3157 | n_rcu_torture_barrier_error = 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3158 | n_rcu_torture_boost_ktrerror = 0; |
| 3159 | n_rcu_torture_boost_rterror = 0; |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3160 | n_rcu_torture_boost_failure = 0; |
| 3161 | n_rcu_torture_boosts = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3162 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
| 3163 | atomic_set(&rcu_torture_wcount[i], 0); |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 3164 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3165 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
| 3166 | per_cpu(rcu_torture_count, cpu)[i] = 0; |
| 3167 | per_cpu(rcu_torture_batch, cpu)[i] = 0; |
| 3168 | } |
| 3169 | } |
Paul E. McKenney | c116dba | 2018-07-13 12:09:14 -0700 | [diff] [blame] | 3170 | err_segs_recorded = 0; |
| 3171 | rt_read_nsegs = 0; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3172 | |
| 3173 | /* Start up the kthreads. */ |
| 3174 | |
Paul E. McKenney | 18fbf30 | 2020-11-16 16:46:06 -0800 | [diff] [blame] | 3175 | rcu_torture_write_types(); |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 3176 | firsterr = torture_create_kthread(rcu_torture_writer, NULL, |
| 3177 | writer_task); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3178 | if (torture_init_error(firsterr)) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3179 | goto unwind; |
Paul E. McKenney | 4444d85 | 2015-05-14 15:42:40 -0700 | [diff] [blame] | 3180 | if (nfakewriters > 0) { |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 3181 | fakewriter_tasks = kcalloc(nfakewriters, |
Paul E. McKenney | 4444d85 | 2015-05-14 15:42:40 -0700 | [diff] [blame] | 3182 | sizeof(fakewriter_tasks[0]), |
| 3183 | GFP_KERNEL); |
| 3184 | if (fakewriter_tasks == NULL) { |
Li Zhijian | 81faa4f | 2021-11-03 16:30:28 +0800 | [diff] [blame] | 3185 | TOROUT_ERRSTRING("out of memory"); |
Paul E. McKenney | 4444d85 | 2015-05-14 15:42:40 -0700 | [diff] [blame] | 3186 | firsterr = -ENOMEM; |
| 3187 | goto unwind; |
| 3188 | } |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 3189 | } |
| 3190 | for (i = 0; i < nfakewriters; i++) { |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 3191 | firsterr = torture_create_kthread(rcu_torture_fakewriter, |
| 3192 | NULL, fakewriter_tasks[i]); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3193 | if (torture_init_error(firsterr)) |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 3194 | goto unwind; |
Josh Triplett | b772e1d | 2006-10-04 02:17:13 -0700 | [diff] [blame] | 3195 | } |
Paul E. McKenney | 68a675d | 2017-12-01 14:26:56 -0800 | [diff] [blame] | 3196 | reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3197 | GFP_KERNEL); |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 3198 | rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), |
| 3199 | GFP_KERNEL); |
| 3200 | if (!reader_tasks || !rcu_torture_reader_mbchk) { |
Li Zhijian | 81faa4f | 2021-11-03 16:30:28 +0800 | [diff] [blame] | 3201 | TOROUT_ERRSTRING("out of memory"); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3202 | firsterr = -ENOMEM; |
| 3203 | goto unwind; |
| 3204 | } |
| 3205 | for (i = 0; i < nrealreaders; i++) { |
Paul E. McKenney | 0050453 | 2020-10-29 15:08:57 -0700 | [diff] [blame] | 3206 | rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; |
Paul E. McKenney | c04dd09 | 2018-07-23 14:16:47 -0700 | [diff] [blame] | 3207 | firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 3208 | reader_tasks[i]); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3209 | if (torture_init_error(firsterr)) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3210 | goto unwind; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3211 | } |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 3212 | nrealnocbers = nocbs_nthreads; |
| 3213 | if (WARN_ON(nrealnocbers < 0)) |
| 3214 | nrealnocbers = 1; |
| 3215 | if (WARN_ON(nocbs_toggle < 0)) |
| 3216 | nocbs_toggle = HZ; |
| 3217 | if (nrealnocbers > 0) { |
| 3218 | nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); |
| 3219 | if (nocb_tasks == NULL) { |
Li Zhijian | 81faa4f | 2021-11-03 16:30:28 +0800 | [diff] [blame] | 3220 | TOROUT_ERRSTRING("out of memory"); |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 3221 | firsterr = -ENOMEM; |
| 3222 | goto unwind; |
| 3223 | } |
| 3224 | } else { |
| 3225 | nocb_tasks = NULL; |
| 3226 | } |
| 3227 | for (i = 0; i < nrealnocbers; i++) { |
| 3228 | firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3229 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 2c4319b | 2020-09-23 17:39:46 -0700 | [diff] [blame] | 3230 | goto unwind; |
| 3231 | } |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3232 | if (stat_interval > 0) { |
Paul E. McKenney | 47cf29b | 2014-02-03 11:52:27 -0800 | [diff] [blame] | 3233 | firsterr = torture_create_kthread(rcu_torture_stats, NULL, |
| 3234 | stats_task); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3235 | if (torture_init_error(firsterr)) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3236 | goto unwind; |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3237 | } |
Paul E. McKenney | e8e255f | 2015-05-14 16:55:45 -0700 | [diff] [blame] | 3238 | if (test_no_idle_hz && shuffle_interval > 0) { |
Paul E. McKenney | 3808dc9 | 2014-01-28 15:29:21 -0800 | [diff] [blame] | 3239 | firsterr = torture_shuffle_init(shuffle_interval * HZ); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3240 | if (torture_init_error(firsterr)) |
Rusty Russell | 73d0a4b | 2009-03-30 22:05:16 -0600 | [diff] [blame] | 3241 | goto unwind; |
Srivatsa Vaddagiri | d84f520 | 2006-01-08 01:03:42 -0800 | [diff] [blame] | 3242 | } |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 3243 | if (stutter < 0) |
| 3244 | stutter = 0; |
| 3245 | if (stutter) { |
Paul E. McKenney | ff3bf92 | 2019-04-09 14:44:49 -0700 | [diff] [blame] | 3246 | int t; |
| 3247 | |
| 3248 | t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; |
| 3249 | firsterr = torture_stutter_init(stutter * HZ, t); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3250 | if (torture_init_error(firsterr)) |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 3251 | goto unwind; |
Paul E. McKenney | d120f65 | 2008-06-18 05:21:44 -0700 | [diff] [blame] | 3252 | } |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3253 | if (fqs_duration < 0) |
| 3254 | fqs_duration = 0; |
| 3255 | if (fqs_duration) { |
Paul E. McKenney | 628edaa | 2014-01-31 11:57:43 -0800 | [diff] [blame] | 3256 | /* Create the fqs thread */ |
Paul E. McKenney | d0d0606 | 2014-03-17 20:56:45 -0700 | [diff] [blame] | 3257 | firsterr = torture_create_kthread(rcu_torture_fqs, NULL, |
| 3258 | fqs_task); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3259 | if (torture_init_error(firsterr)) |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3260 | goto unwind; |
Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 3261 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3262 | if (test_boost_interval < 1) |
| 3263 | test_boost_interval = 1; |
| 3264 | if (test_boost_duration < 2) |
| 3265 | test_boost_duration = 2; |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 3266 | if (rcu_torture_can_boost()) { |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3267 | |
| 3268 | boost_starttime = jiffies + test_boost_interval * HZ; |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 3269 | |
| 3270 | firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", |
| 3271 | rcutorture_booster_init, |
| 3272 | rcutorture_booster_cleanup); |
Sebastian Andrzej Siewior | 0ffd374 | 2016-08-18 14:57:22 +0200 | [diff] [blame] | 3273 | rcutor_hp = firsterr; |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3274 | if (torture_init_error(firsterr)) |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3275 | goto unwind; |
Paul E. McKenney | ea6d962 | 2021-03-30 16:30:32 -0700 | [diff] [blame] | 3276 | |
| 3277 | // Testing RCU priority boosting requires rcutorture do |
| 3278 | // some serious abuse. Counter this by running ksoftirqd |
| 3279 | // at higher priority. |
| 3280 | if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { |
| 3281 | for_each_online_cpu(cpu) { |
| 3282 | struct sched_param sp; |
| 3283 | struct task_struct *t; |
| 3284 | |
| 3285 | t = per_cpu(ksoftirqd, cpu); |
| 3286 | WARN_ON_ONCE(!t); |
| 3287 | sp.sched_priority = 2; |
| 3288 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
| 3289 | } |
| 3290 | } |
Paul E. McKenney | 8e8be45 | 2010-09-02 16:16:14 -0700 | [diff] [blame] | 3291 | } |
Paul E. McKenney | 60013d5 | 2019-07-10 08:30:00 -0700 | [diff] [blame] | 3292 | shutdown_jiffies = jiffies + shutdown_secs * HZ; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 3293 | firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3294 | if (torture_init_error(firsterr)) |
Paul E. McKenney | e991dbc | 2014-01-31 14:52:13 -0800 | [diff] [blame] | 3295 | goto unwind; |
Paul E. McKenney | 3a6cb58 | 2018-12-10 09:44:52 -0800 | [diff] [blame] | 3296 | firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, |
| 3297 | rcutorture_sync); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3298 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 37e377d | 2012-02-17 22:12:18 -0800 | [diff] [blame] | 3299 | goto unwind; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 3300 | firsterr = rcu_torture_stall_init(); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3301 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 37e377d | 2012-02-17 22:12:18 -0800 | [diff] [blame] | 3302 | goto unwind; |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 3303 | firsterr = rcu_torture_fwd_prog_init(); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3304 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 1b27291 | 2018-07-18 14:32:31 -0700 | [diff] [blame] | 3305 | goto unwind; |
Paul E. McKenney | 01025eb | 2014-01-31 15:15:02 -0800 | [diff] [blame] | 3306 | firsterr = rcu_torture_barrier_init(); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3307 | if (torture_init_error(firsterr)) |
Paul E. McKenney | fae4b54 | 2012-02-20 17:51:45 -0800 | [diff] [blame] | 3308 | goto unwind; |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 3309 | firsterr = rcu_torture_read_exit_init(); |
Paul E. McKenney | efeff6b | 2021-08-05 13:28:24 -0700 | [diff] [blame] | 3310 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 4a5f133 | 2020-04-24 11:21:40 -0700 | [diff] [blame] | 3311 | goto unwind; |
Paul E. McKenney | d2818df | 2013-04-23 17:05:42 -0700 | [diff] [blame] | 3312 | if (object_debug) |
| 3313 | rcu_test_debug_objects(); |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 3314 | torture_init_end(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3315 | return 0; |
| 3316 | |
| 3317 | unwind: |
Paul E. McKenney | b5daa8f | 2014-01-30 13:38:09 -0800 | [diff] [blame] | 3318 | torture_init_end(); |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3319 | rcu_torture_cleanup(); |
Paul E. McKenney | 4994684 | 2020-09-18 13:30:33 -0700 | [diff] [blame] | 3320 | if (shutdown_secs) { |
| 3321 | WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); |
| 3322 | kernel_power_off(); |
| 3323 | } |
Paul E. McKenney | a241ec6 | 2005-10-30 15:03:12 -0800 | [diff] [blame] | 3324 | return firsterr; |
| 3325 | } |
| 3326 | |
| 3327 | module_init(rcu_torture_init); |
| 3328 | module_exit(rcu_torture_cleanup); |