Paul E. McKenney | 8bf05ed | 2019-01-17 10:09:19 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 2 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 3 | * Read-Copy Update module-based scalability-test facility |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 4 | * |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2015 |
| 6 | * |
Paul E. McKenney | 8bf05ed | 2019-01-17 10:09:19 -0800 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 8 | */ |
Paul E. McKenney | 6050003 | 2018-05-15 12:25:05 -0700 | [diff] [blame] | 9 | |
| 10 | #define pr_fmt(fmt) fmt |
| 11 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 12 | #include <linux/types.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/init.h> |
Joel Fernandes (Google) | 12af660 | 2019-12-19 11:22:42 -0500 | [diff] [blame] | 15 | #include <linux/mm.h> |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 16 | #include <linux/module.h> |
| 17 | #include <linux/kthread.h> |
| 18 | #include <linux/err.h> |
| 19 | #include <linux/spinlock.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <linux/rcupdate.h> |
| 22 | #include <linux/interrupt.h> |
| 23 | #include <linux/sched.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 24 | #include <uapi/linux/sched/types.h> |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 25 | #include <linux/atomic.h> |
| 26 | #include <linux/bitops.h> |
| 27 | #include <linux/completion.h> |
| 28 | #include <linux/moduleparam.h> |
| 29 | #include <linux/percpu.h> |
| 30 | #include <linux/notifier.h> |
| 31 | #include <linux/reboot.h> |
| 32 | #include <linux/freezer.h> |
| 33 | #include <linux/cpu.h> |
| 34 | #include <linux/delay.h> |
| 35 | #include <linux/stat.h> |
| 36 | #include <linux/srcu.h> |
| 37 | #include <linux/slab.h> |
| 38 | #include <asm/byteorder.h> |
| 39 | #include <linux/torture.h> |
| 40 | #include <linux/vmalloc.h> |
Paul E. McKenney | 899f317 | 2020-09-09 12:27:03 -0700 | [diff] [blame] | 41 | #include <linux/rcupdate_trace.h> |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 42 | |
Paul E. McKenney | 25c3632 | 2017-05-03 09:51:55 -0700 | [diff] [blame] | 43 | #include "rcu.h" |
| 44 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 45 | MODULE_LICENSE("GPL"); |
Paul E. McKenney | 8bf05ed | 2019-01-17 10:09:19 -0800 | [diff] [blame] | 46 | MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 47 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 48 | #define SCALE_FLAG "-scale:" |
| 49 | #define SCALEOUT_STRING(s) \ |
| 50 | pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s) |
| 51 | #define VERBOSE_SCALEOUT_STRING(s) \ |
| 52 | do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0) |
Li Zhijian | 86e7ed1 | 2021-10-29 17:40:28 +0800 | [diff] [blame] | 53 | #define SCALEOUT_ERRSTRING(s) \ |
| 54 | pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 55 | |
Paul E. McKenney | 85ba6bf | 2018-02-01 19:19:04 -0800 | [diff] [blame] | 56 | /* |
| 57 | * The intended use cases for the nreaders and nwriters module parameters |
| 58 | * are as follows: |
| 59 | * |
| 60 | * 1. Specify only the nr_cpus kernel boot parameter. This will |
| 61 | * set both nreaders and nwriters to the value specified by |
| 62 | * nr_cpus for a mixed reader/writer test. |
| 63 | * |
| 64 | * 2. Specify the nr_cpus kernel boot parameter, but set |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 65 | * rcuscale.nreaders to zero. This will set nwriters to the |
Paul E. McKenney | 85ba6bf | 2018-02-01 19:19:04 -0800 | [diff] [blame] | 66 | * value specified by nr_cpus for an update-only test. |
| 67 | * |
| 68 | * 3. Specify the nr_cpus kernel boot parameter, but set |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 69 | * rcuscale.nwriters to zero. This will set nreaders to the |
Paul E. McKenney | 85ba6bf | 2018-02-01 19:19:04 -0800 | [diff] [blame] | 70 | * value specified by nr_cpus for a read-only test. |
| 71 | * |
| 72 | * Various other use cases may of course be specified. |
Paul E. McKenney | 708cda3 | 2020-05-25 09:22:24 -0700 | [diff] [blame] | 73 | * |
| 74 | * Note that this test's readers are intended only as a test load for |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 75 | * the writers. The reader scalability statistics will be overly |
Paul E. McKenney | 708cda3 | 2020-05-25 09:22:24 -0700 | [diff] [blame] | 76 | * pessimistic due to the per-critical-section interrupt disabling, |
| 77 | * test-end checks, and the pair of calls through pointers. |
Paul E. McKenney | 85ba6bf | 2018-02-01 19:19:04 -0800 | [diff] [blame] | 78 | */ |
| 79 | |
Paul E. McKenney | e838a7d | 2018-12-28 07:48:43 -0800 | [diff] [blame] | 80 | #ifdef MODULE |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 81 | # define RCUSCALE_SHUTDOWN 0 |
Paul E. McKenney | e838a7d | 2018-12-28 07:48:43 -0800 | [diff] [blame] | 82 | #else |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 83 | # define RCUSCALE_SHUTDOWN 1 |
Paul E. McKenney | e838a7d | 2018-12-28 07:48:43 -0800 | [diff] [blame] | 84 | #endif |
| 85 | |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 86 | torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); |
| 87 | torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader"); |
Boqun Feng | af06d4f | 2016-05-25 09:25:33 +0800 | [diff] [blame] | 88 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); |
Paul E. McKenney | df37e66 | 2016-01-30 20:56:38 -0800 | [diff] [blame] | 89 | torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); |
Paul E. McKenney | 85ba6bf | 2018-02-01 19:19:04 -0800 | [diff] [blame] | 90 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 91 | torture_param(int, nwriters, -1, "Number of RCU updater threads"); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 92 | torture_param(bool, shutdown, RCUSCALE_SHUTDOWN, |
| 93 | "Shutdown at end of scalability tests."); |
Paul E. McKenney | 90127d6 | 2018-05-09 10:29:18 -0700 | [diff] [blame] | 94 | torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); |
Paul E. McKenney | 820687a | 2017-04-25 15:12:56 -0700 | [diff] [blame] | 95 | torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable"); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 96 | torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?"); |
Joel Fernandes (Google) | f87dc80 | 2020-03-16 12:32:26 -0400 | [diff] [blame] | 97 | torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate."); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 98 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 99 | static char *scale_type = "rcu"; |
| 100 | module_param(scale_type, charp, 0444); |
| 101 | MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 102 | |
| 103 | static int nrealreaders; |
| 104 | static int nrealwriters; |
| 105 | static struct task_struct **writer_tasks; |
| 106 | static struct task_struct **reader_tasks; |
| 107 | static struct task_struct *shutdown_task; |
| 108 | |
| 109 | static u64 **writer_durations; |
| 110 | static int *writer_n_durations; |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 111 | static atomic_t n_rcu_scale_reader_started; |
| 112 | static atomic_t n_rcu_scale_writer_started; |
| 113 | static atomic_t n_rcu_scale_writer_finished; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 114 | static wait_queue_head_t shutdown_wq; |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 115 | static u64 t_rcu_scale_writer_started; |
| 116 | static u64 t_rcu_scale_writer_finished; |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 117 | static unsigned long b_rcu_gp_test_started; |
| 118 | static unsigned long b_rcu_gp_test_finished; |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 119 | static DEFINE_PER_CPU(atomic_t, n_async_inflight); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 120 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 121 | #define MAX_MEAS 10000 |
| 122 | #define MIN_MEAS 100 |
| 123 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 124 | /* |
| 125 | * Operations vector for selecting different types of tests. |
| 126 | */ |
| 127 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 128 | struct rcu_scale_ops { |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 129 | int ptype; |
| 130 | void (*init)(void); |
| 131 | void (*cleanup)(void); |
| 132 | int (*readlock)(void); |
| 133 | void (*readunlock)(int idx); |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 134 | unsigned long (*get_gp_seq)(void); |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 135 | unsigned long (*gp_diff)(unsigned long new, unsigned long old); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 136 | unsigned long (*exp_completed)(void); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 137 | void (*async)(struct rcu_head *head, rcu_callback_t func); |
| 138 | void (*gp_barrier)(void); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 139 | void (*sync)(void); |
| 140 | void (*exp_sync)(void); |
| 141 | const char *name; |
| 142 | }; |
| 143 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 144 | static struct rcu_scale_ops *cur_ops; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 145 | |
| 146 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 147 | * Definitions for rcu scalability testing. |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 148 | */ |
| 149 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 150 | static int rcu_scale_read_lock(void) __acquires(RCU) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 151 | { |
| 152 | rcu_read_lock(); |
| 153 | return 0; |
| 154 | } |
| 155 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 156 | static void rcu_scale_read_unlock(int idx) __releases(RCU) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 157 | { |
| 158 | rcu_read_unlock(); |
| 159 | } |
| 160 | |
| 161 | static unsigned long __maybe_unused rcu_no_completed(void) |
| 162 | { |
| 163 | return 0; |
| 164 | } |
| 165 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 166 | static void rcu_sync_scale_init(void) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 167 | { |
| 168 | } |
| 169 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 170 | static struct rcu_scale_ops rcu_ops = { |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 171 | .ptype = RCU_FLAVOR, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 172 | .init = rcu_sync_scale_init, |
| 173 | .readlock = rcu_scale_read_lock, |
| 174 | .readunlock = rcu_scale_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 175 | .get_gp_seq = rcu_get_gp_seq, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 176 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 177 | .exp_completed = rcu_exp_batches_completed, |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 178 | .async = call_rcu, |
| 179 | .gp_barrier = rcu_barrier, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 180 | .sync = synchronize_rcu, |
| 181 | .exp_sync = synchronize_rcu_expedited, |
| 182 | .name = "rcu" |
| 183 | }; |
| 184 | |
| 185 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 186 | * Definitions for srcu scalability testing. |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 187 | */ |
| 188 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 189 | DEFINE_STATIC_SRCU(srcu_ctl_scale); |
| 190 | static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 191 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 192 | static int srcu_scale_read_lock(void) __acquires(srcu_ctlp) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 193 | { |
| 194 | return srcu_read_lock(srcu_ctlp); |
| 195 | } |
| 196 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 197 | static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 198 | { |
| 199 | srcu_read_unlock(srcu_ctlp, idx); |
| 200 | } |
| 201 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 202 | static unsigned long srcu_scale_completed(void) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 203 | { |
| 204 | return srcu_batches_completed(srcu_ctlp); |
| 205 | } |
| 206 | |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 207 | static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func) |
| 208 | { |
| 209 | call_srcu(srcu_ctlp, head, func); |
| 210 | } |
| 211 | |
| 212 | static void srcu_rcu_barrier(void) |
| 213 | { |
| 214 | srcu_barrier(srcu_ctlp); |
| 215 | } |
| 216 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 217 | static void srcu_scale_synchronize(void) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 218 | { |
| 219 | synchronize_srcu(srcu_ctlp); |
| 220 | } |
| 221 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 222 | static void srcu_scale_synchronize_expedited(void) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 223 | { |
| 224 | synchronize_srcu_expedited(srcu_ctlp); |
| 225 | } |
| 226 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 227 | static struct rcu_scale_ops srcu_ops = { |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 228 | .ptype = SRCU_FLAVOR, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 229 | .init = rcu_sync_scale_init, |
| 230 | .readlock = srcu_scale_read_lock, |
| 231 | .readunlock = srcu_scale_read_unlock, |
| 232 | .get_gp_seq = srcu_scale_completed, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 233 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 234 | .exp_completed = srcu_scale_completed, |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 235 | .async = srcu_call_rcu, |
| 236 | .gp_barrier = srcu_rcu_barrier, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 237 | .sync = srcu_scale_synchronize, |
| 238 | .exp_sync = srcu_scale_synchronize_expedited, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 239 | .name = "srcu" |
| 240 | }; |
| 241 | |
Paul E. McKenney | f60cb4d | 2017-04-19 13:43:21 -0700 | [diff] [blame] | 242 | static struct srcu_struct srcud; |
| 243 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 244 | static void srcu_sync_scale_init(void) |
Paul E. McKenney | f60cb4d | 2017-04-19 13:43:21 -0700 | [diff] [blame] | 245 | { |
| 246 | srcu_ctlp = &srcud; |
| 247 | init_srcu_struct(srcu_ctlp); |
| 248 | } |
| 249 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 250 | static void srcu_sync_scale_cleanup(void) |
Paul E. McKenney | f60cb4d | 2017-04-19 13:43:21 -0700 | [diff] [blame] | 251 | { |
| 252 | cleanup_srcu_struct(srcu_ctlp); |
| 253 | } |
| 254 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 255 | static struct rcu_scale_ops srcud_ops = { |
Paul E. McKenney | f60cb4d | 2017-04-19 13:43:21 -0700 | [diff] [blame] | 256 | .ptype = SRCU_FLAVOR, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 257 | .init = srcu_sync_scale_init, |
| 258 | .cleanup = srcu_sync_scale_cleanup, |
| 259 | .readlock = srcu_scale_read_lock, |
| 260 | .readunlock = srcu_scale_read_unlock, |
| 261 | .get_gp_seq = srcu_scale_completed, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 262 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 263 | .exp_completed = srcu_scale_completed, |
Paul E. McKenney | f60cb4d | 2017-04-19 13:43:21 -0700 | [diff] [blame] | 264 | .async = srcu_call_rcu, |
| 265 | .gp_barrier = srcu_rcu_barrier, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 266 | .sync = srcu_scale_synchronize, |
| 267 | .exp_sync = srcu_scale_synchronize_expedited, |
Paul E. McKenney | f60cb4d | 2017-04-19 13:43:21 -0700 | [diff] [blame] | 268 | .name = "srcud" |
| 269 | }; |
| 270 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 271 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 272 | * Definitions for RCU-tasks scalability testing. |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 273 | */ |
| 274 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 275 | static int tasks_scale_read_lock(void) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 276 | { |
| 277 | return 0; |
| 278 | } |
| 279 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 280 | static void tasks_scale_read_unlock(int idx) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 281 | { |
| 282 | } |
| 283 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 284 | static struct rcu_scale_ops tasks_ops = { |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 285 | .ptype = RCU_TASKS_FLAVOR, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 286 | .init = rcu_sync_scale_init, |
| 287 | .readlock = tasks_scale_read_lock, |
| 288 | .readunlock = tasks_scale_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 289 | .get_gp_seq = rcu_no_completed, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 290 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 291 | .async = call_rcu_tasks, |
| 292 | .gp_barrier = rcu_barrier_tasks, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 293 | .sync = synchronize_rcu_tasks, |
| 294 | .exp_sync = synchronize_rcu_tasks, |
| 295 | .name = "tasks" |
| 296 | }; |
| 297 | |
Paul E. McKenney | 899f317 | 2020-09-09 12:27:03 -0700 | [diff] [blame] | 298 | /* |
| 299 | * Definitions for RCU-tasks-trace scalability testing. |
| 300 | */ |
| 301 | |
| 302 | static int tasks_trace_scale_read_lock(void) |
| 303 | { |
| 304 | rcu_read_lock_trace(); |
| 305 | return 0; |
| 306 | } |
| 307 | |
| 308 | static void tasks_trace_scale_read_unlock(int idx) |
| 309 | { |
| 310 | rcu_read_unlock_trace(); |
| 311 | } |
| 312 | |
| 313 | static struct rcu_scale_ops tasks_tracing_ops = { |
| 314 | .ptype = RCU_TASKS_FLAVOR, |
| 315 | .init = rcu_sync_scale_init, |
| 316 | .readlock = tasks_trace_scale_read_lock, |
| 317 | .readunlock = tasks_trace_scale_read_unlock, |
| 318 | .get_gp_seq = rcu_no_completed, |
| 319 | .gp_diff = rcu_seq_diff, |
| 320 | .async = call_rcu_tasks_trace, |
| 321 | .gp_barrier = rcu_barrier_tasks_trace, |
| 322 | .sync = synchronize_rcu_tasks_trace, |
| 323 | .exp_sync = synchronize_rcu_tasks_trace, |
| 324 | .name = "tasks-tracing" |
| 325 | }; |
| 326 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 327 | static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old) |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 328 | { |
| 329 | if (!cur_ops->gp_diff) |
| 330 | return new - old; |
| 331 | return cur_ops->gp_diff(new, old); |
| 332 | } |
| 333 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 334 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 335 | * If scalability tests complete, wait for shutdown to commence. |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 336 | */ |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 337 | static void rcu_scale_wait_shutdown(void) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 338 | { |
Paul E. McKenney | cee4393 | 2018-03-02 16:35:27 -0800 | [diff] [blame] | 339 | cond_resched_tasks_rcu_qs(); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 340 | if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 341 | return; |
| 342 | while (!torture_must_stop()) |
| 343 | schedule_timeout_uninterruptible(1); |
| 344 | } |
| 345 | |
| 346 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 347 | * RCU scalability reader kthread. Repeatedly does empty RCU read-side |
| 348 | * critical section, minimizing update-side interference. However, the |
| 349 | * point of this test is not to evaluate reader scalability, but instead |
| 350 | * to serve as a test load for update-side scalability testing. |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 351 | */ |
| 352 | static int |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 353 | rcu_scale_reader(void *arg) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 354 | { |
| 355 | unsigned long flags; |
| 356 | int idx; |
Paul E. McKenney | 6b558c4 | 2016-01-12 14:15:40 -0800 | [diff] [blame] | 357 | long me = (long)arg; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 358 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 359 | VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started"); |
Paul E. McKenney | 6b558c4 | 2016-01-12 14:15:40 -0800 | [diff] [blame] | 360 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 361 | set_user_nice(current, MAX_NICE); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 362 | atomic_inc(&n_rcu_scale_reader_started); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 363 | |
| 364 | do { |
| 365 | local_irq_save(flags); |
| 366 | idx = cur_ops->readlock(); |
| 367 | cur_ops->readunlock(idx); |
| 368 | local_irq_restore(flags); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 369 | rcu_scale_wait_shutdown(); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 370 | } while (!torture_must_stop()); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 371 | torture_kthread_stopping("rcu_scale_reader"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 372 | return 0; |
| 373 | } |
| 374 | |
| 375 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 376 | * Callback function for asynchronous grace periods from rcu_scale_writer(). |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 377 | */ |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 378 | static void rcu_scale_async_cb(struct rcu_head *rhp) |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 379 | { |
| 380 | atomic_dec(this_cpu_ptr(&n_async_inflight)); |
| 381 | kfree(rhp); |
| 382 | } |
| 383 | |
| 384 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 385 | * RCU scale writer kthread. Repeatedly does a grace period. |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 386 | */ |
| 387 | static int |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 388 | rcu_scale_writer(void *arg) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 389 | { |
| 390 | int i = 0; |
| 391 | int i_max; |
| 392 | long me = (long)arg; |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 393 | struct rcu_head *rhp = NULL; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 394 | bool started = false, done = false, alldone = false; |
| 395 | u64 t; |
| 396 | u64 *wdp; |
| 397 | u64 *wdpp = writer_durations[me]; |
| 398 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 399 | VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 400 | WARN_ON(!wdpp); |
Paul E. McKenney | 6b558c4 | 2016-01-12 14:15:40 -0800 | [diff] [blame] | 401 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
Peter Zijlstra | b143339 | 2020-04-21 12:09:13 +0200 | [diff] [blame] | 402 | sched_set_fifo_low(current); |
Paul E. McKenney | df37e66 | 2016-01-30 20:56:38 -0800 | [diff] [blame] | 403 | |
| 404 | if (holdoff) |
| 405 | schedule_timeout_uninterruptible(holdoff * HZ); |
| 406 | |
Joel Fernandes (Google) | 77e9752 | 2019-07-04 00:34:30 -0400 | [diff] [blame] | 407 | /* |
| 408 | * Wait until rcu_end_inkernel_boot() is called for normal GP tests |
| 409 | * so that RCU is not always expedited for normal GP tests. |
| 410 | * The system_state test is approximate, but works well in practice. |
| 411 | */ |
| 412 | while (!gp_exp && system_state != SYSTEM_RUNNING) |
| 413 | schedule_timeout_uninterruptible(1); |
| 414 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 415 | t = ktime_get_mono_fast_ns(); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 416 | if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) { |
| 417 | t_rcu_scale_writer_started = t; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 418 | if (gp_exp) { |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 419 | b_rcu_gp_test_started = |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 420 | cur_ops->exp_completed() / 2; |
| 421 | } else { |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 422 | b_rcu_gp_test_started = cur_ops->get_gp_seq(); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 423 | } |
| 424 | } |
| 425 | |
| 426 | do { |
Paul E. McKenney | 820687a | 2017-04-25 15:12:56 -0700 | [diff] [blame] | 427 | if (writer_holdoff) |
| 428 | udelay(writer_holdoff); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 429 | wdp = &wdpp[i]; |
| 430 | *wdp = ktime_get_mono_fast_ns(); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 431 | if (gp_async) { |
| 432 | retry: |
| 433 | if (!rhp) |
| 434 | rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
| 435 | if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) { |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 436 | atomic_inc(this_cpu_ptr(&n_async_inflight)); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 437 | cur_ops->async(rhp, rcu_scale_async_cb); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 438 | rhp = NULL; |
| 439 | } else if (!kthread_should_stop()) { |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 440 | cur_ops->gp_barrier(); |
| 441 | goto retry; |
| 442 | } else { |
| 443 | kfree(rhp); /* Because we are stopping. */ |
| 444 | } |
| 445 | } else if (gp_exp) { |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 446 | cur_ops->exp_sync(); |
| 447 | } else { |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 448 | cur_ops->sync(); |
| 449 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 450 | t = ktime_get_mono_fast_ns(); |
| 451 | *wdp = t - *wdp; |
| 452 | i_max = i; |
| 453 | if (!started && |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 454 | atomic_read(&n_rcu_scale_writer_started) >= nrealwriters) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 455 | started = true; |
| 456 | if (!done && i >= MIN_MEAS) { |
| 457 | done = true; |
Peter Zijlstra | b143339 | 2020-04-21 12:09:13 +0200 | [diff] [blame] | 458 | sched_set_normal(current, 0); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 459 | pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n", |
| 460 | scale_type, SCALE_FLAG, me, MIN_MEAS); |
| 461 | if (atomic_inc_return(&n_rcu_scale_writer_finished) >= |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 462 | nrealwriters) { |
Paul E. McKenney | 620316e | 2016-01-30 21:32:09 -0800 | [diff] [blame] | 463 | schedule_timeout_interruptible(10); |
Paul E. McKenney | ac2bb27 | 2016-01-29 14:58:17 -0800 | [diff] [blame] | 464 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 465 | SCALEOUT_STRING("Test complete"); |
| 466 | t_rcu_scale_writer_finished = t; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 467 | if (gp_exp) { |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 468 | b_rcu_gp_test_finished = |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 469 | cur_ops->exp_completed() / 2; |
| 470 | } else { |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 471 | b_rcu_gp_test_finished = |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 472 | cur_ops->get_gp_seq(); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 473 | } |
Artem Savkov | e6fb1fc | 2016-02-07 13:31:39 +0100 | [diff] [blame] | 474 | if (shutdown) { |
| 475 | smp_mb(); /* Assign before wake. */ |
| 476 | wake_up(&shutdown_wq); |
| 477 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 478 | } |
| 479 | } |
| 480 | if (done && !alldone && |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 481 | atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 482 | alldone = true; |
| 483 | if (started && !alldone && i < MAX_MEAS - 1) |
| 484 | i++; |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 485 | rcu_scale_wait_shutdown(); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 486 | } while (!torture_must_stop()); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 487 | if (gp_async) { |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 488 | cur_ops->gp_barrier(); |
| 489 | } |
Jiangong.Han | 811192c | 2021-06-22 18:37:08 +0800 | [diff] [blame] | 490 | writer_n_durations[me] = i_max + 1; |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 491 | torture_kthread_stopping("rcu_scale_writer"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 492 | return 0; |
| 493 | } |
| 494 | |
Paul E. McKenney | 9622179 | 2018-05-17 11:33:17 -0700 | [diff] [blame] | 495 | static void |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 496 | rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 497 | { |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 498 | pr_alert("%s" SCALE_FLAG |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 499 | "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n", |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 500 | scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 501 | } |
| 502 | |
| 503 | static void |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 504 | rcu_scale_cleanup(void) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 505 | { |
| 506 | int i; |
| 507 | int j; |
| 508 | int ngps = 0; |
| 509 | u64 *wdp; |
| 510 | u64 *wdpp; |
| 511 | |
Paul E. McKenney | 9683937 | 2017-04-14 16:12:52 -0700 | [diff] [blame] | 512 | /* |
| 513 | * Would like warning at start, but everything is expedited |
| 514 | * during the mid-boot phase, so have to wait till the end. |
| 515 | */ |
| 516 | if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp) |
Li Zhijian | 86e7ed1 | 2021-10-29 17:40:28 +0800 | [diff] [blame] | 517 | SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!"); |
Paul E. McKenney | 9683937 | 2017-04-14 16:12:52 -0700 | [diff] [blame] | 518 | if (rcu_gp_is_normal() && gp_exp) |
Li Zhijian | 86e7ed1 | 2021-10-29 17:40:28 +0800 | [diff] [blame] | 519 | SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!"); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 520 | if (gp_exp && gp_async) |
Li Zhijian | 86e7ed1 | 2021-10-29 17:40:28 +0800 | [diff] [blame] | 521 | SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!"); |
Paul E. McKenney | 9683937 | 2017-04-14 16:12:52 -0700 | [diff] [blame] | 522 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 523 | if (torture_cleanup_begin()) |
| 524 | return; |
Paul E. McKenney | ad092c0 | 2019-03-21 10:26:41 -0700 | [diff] [blame] | 525 | if (!cur_ops) { |
| 526 | torture_cleanup_end(); |
| 527 | return; |
| 528 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 529 | |
| 530 | if (reader_tasks) { |
| 531 | for (i = 0; i < nrealreaders; i++) |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 532 | torture_stop_kthread(rcu_scale_reader, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 533 | reader_tasks[i]); |
| 534 | kfree(reader_tasks); |
| 535 | } |
| 536 | |
| 537 | if (writer_tasks) { |
| 538 | for (i = 0; i < nrealwriters; i++) { |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 539 | torture_stop_kthread(rcu_scale_writer, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 540 | writer_tasks[i]); |
| 541 | if (!writer_n_durations) |
| 542 | continue; |
| 543 | j = writer_n_durations[i]; |
| 544 | pr_alert("%s%s writer %d gps: %d\n", |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 545 | scale_type, SCALE_FLAG, i, j); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 546 | ngps += j; |
| 547 | } |
| 548 | pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n", |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 549 | scale_type, SCALE_FLAG, |
| 550 | t_rcu_scale_writer_started, t_rcu_scale_writer_finished, |
| 551 | t_rcu_scale_writer_finished - |
| 552 | t_rcu_scale_writer_started, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 553 | ngps, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 554 | rcuscale_seq_diff(b_rcu_gp_test_finished, |
| 555 | b_rcu_gp_test_started)); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 556 | for (i = 0; i < nrealwriters; i++) { |
| 557 | if (!writer_durations) |
| 558 | break; |
| 559 | if (!writer_n_durations) |
| 560 | continue; |
| 561 | wdpp = writer_durations[i]; |
| 562 | if (!wdpp) |
| 563 | continue; |
Jiangong.Han | 811192c | 2021-06-22 18:37:08 +0800 | [diff] [blame] | 564 | for (j = 0; j < writer_n_durations[i]; j++) { |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 565 | wdp = &wdpp[j]; |
| 566 | pr_alert("%s%s %4d writer-duration: %5d %llu\n", |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 567 | scale_type, SCALE_FLAG, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 568 | i, j, *wdp); |
| 569 | if (j % 100 == 0) |
| 570 | schedule_timeout_uninterruptible(1); |
| 571 | } |
| 572 | kfree(writer_durations[i]); |
| 573 | } |
| 574 | kfree(writer_tasks); |
| 575 | kfree(writer_durations); |
| 576 | kfree(writer_n_durations); |
| 577 | } |
| 578 | |
Paul E. McKenney | 620d246 | 2018-07-07 18:25:10 -0700 | [diff] [blame] | 579 | /* Do torture-type-specific cleanup operations. */ |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 580 | if (cur_ops->cleanup != NULL) |
| 581 | cur_ops->cleanup(); |
| 582 | |
| 583 | torture_cleanup_end(); |
| 584 | } |
| 585 | |
| 586 | /* |
| 587 | * Return the number if non-negative. If -1, the number of CPUs. |
| 588 | * If less than -1, that much less than the number of CPUs, but |
| 589 | * at least one. |
| 590 | */ |
| 591 | static int compute_real(int n) |
| 592 | { |
| 593 | int nr; |
| 594 | |
| 595 | if (n >= 0) { |
| 596 | nr = n; |
| 597 | } else { |
| 598 | nr = num_online_cpus() + 1 + n; |
| 599 | if (nr <= 0) |
| 600 | nr = 1; |
| 601 | } |
| 602 | return nr; |
| 603 | } |
| 604 | |
| 605 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 606 | * RCU scalability shutdown kthread. Just waits to be awakened, then shuts |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 607 | * down system. |
| 608 | */ |
| 609 | static int |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 610 | rcu_scale_shutdown(void *arg) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 611 | { |
Joel Fernandes (Google) | 7e86646 | 2020-05-25 00:36:47 -0400 | [diff] [blame] | 612 | wait_event(shutdown_wq, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 613 | atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 614 | smp_mb(); /* Wake before output. */ |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 615 | rcu_scale_cleanup(); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 616 | kernel_power_off(); |
| 617 | return -EINVAL; |
| 618 | } |
| 619 | |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 620 | /* |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 621 | * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 622 | * of iterations and measure total time and number of GP for all iterations to complete. |
| 623 | */ |
| 624 | |
| 625 | torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu()."); |
| 626 | torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration."); |
| 627 | torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees."); |
Uladzislau Rezki (Sony) | 686fe1b | 2021-02-17 19:51:10 +0100 | [diff] [blame] | 628 | torture_param(bool, kfree_rcu_test_double, false, "Do we run a kfree_rcu() double-argument scale test?"); |
| 629 | torture_param(bool, kfree_rcu_test_single, false, "Do we run a kfree_rcu() single-argument scale test?"); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 630 | |
| 631 | static struct task_struct **kfree_reader_tasks; |
| 632 | static int kfree_nrealthreads; |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 633 | static atomic_t n_kfree_scale_thread_started; |
| 634 | static atomic_t n_kfree_scale_thread_ended; |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 635 | |
| 636 | struct kfree_obj { |
| 637 | char kfree_obj[8]; |
| 638 | struct rcu_head rh; |
| 639 | }; |
| 640 | |
| 641 | static int |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 642 | kfree_scale_thread(void *arg) |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 643 | { |
| 644 | int i, loop = 0; |
| 645 | long me = (long)arg; |
| 646 | struct kfree_obj *alloc_ptr; |
| 647 | u64 start_time, end_time; |
Joel Fernandes (Google) | 12af660 | 2019-12-19 11:22:42 -0500 | [diff] [blame] | 648 | long long mem_begin, mem_during = 0; |
Uladzislau Rezki (Sony) | 686fe1b | 2021-02-17 19:51:10 +0100 | [diff] [blame] | 649 | bool kfree_rcu_test_both; |
| 650 | DEFINE_TORTURE_RANDOM(tr); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 651 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 652 | VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started"); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 653 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
| 654 | set_user_nice(current, MAX_NICE); |
Uladzislau Rezki (Sony) | 686fe1b | 2021-02-17 19:51:10 +0100 | [diff] [blame] | 655 | kfree_rcu_test_both = (kfree_rcu_test_single == kfree_rcu_test_double); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 656 | |
| 657 | start_time = ktime_get_mono_fast_ns(); |
| 658 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 659 | if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) { |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 660 | if (gp_exp) |
| 661 | b_rcu_gp_test_started = cur_ops->exp_completed() / 2; |
| 662 | else |
| 663 | b_rcu_gp_test_started = cur_ops->get_gp_seq(); |
| 664 | } |
| 665 | |
| 666 | do { |
Joel Fernandes (Google) | 12af660 | 2019-12-19 11:22:42 -0500 | [diff] [blame] | 667 | if (!mem_during) { |
| 668 | mem_during = mem_begin = si_mem_available(); |
| 669 | } else if (loop % (kfree_loops / 4) == 0) { |
| 670 | mem_during = (mem_during + si_mem_available()) / 2; |
| 671 | } |
| 672 | |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 673 | for (i = 0; i < kfree_alloc_num; i++) { |
Joel Fernandes (Google) | f87dc80 | 2020-03-16 12:32:26 -0400 | [diff] [blame] | 674 | alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 675 | if (!alloc_ptr) |
| 676 | return -ENOMEM; |
| 677 | |
Uladzislau Rezki (Sony) | 686fe1b | 2021-02-17 19:51:10 +0100 | [diff] [blame] | 678 | // By default kfree_rcu_test_single and kfree_rcu_test_double are |
| 679 | // initialized to false. If both have the same value (false or true) |
| 680 | // both are randomly tested, otherwise only the one with value true |
| 681 | // is tested. |
| 682 | if ((kfree_rcu_test_single && !kfree_rcu_test_double) || |
| 683 | (kfree_rcu_test_both && torture_random(&tr) & 0x800)) |
| 684 | kfree_rcu(alloc_ptr); |
| 685 | else |
| 686 | kfree_rcu(alloc_ptr, rh); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 687 | } |
| 688 | |
| 689 | cond_resched(); |
| 690 | } while (!torture_must_stop() && ++loop < kfree_loops); |
| 691 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 692 | if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) { |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 693 | end_time = ktime_get_mono_fast_ns(); |
| 694 | |
| 695 | if (gp_exp) |
| 696 | b_rcu_gp_test_finished = cur_ops->exp_completed() / 2; |
| 697 | else |
| 698 | b_rcu_gp_test_finished = cur_ops->get_gp_seq(); |
| 699 | |
Joel Fernandes (Google) | 12af660 | 2019-12-19 11:22:42 -0500 | [diff] [blame] | 700 | pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n", |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 701 | (unsigned long long)(end_time - start_time), kfree_loops, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 702 | rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started), |
Joel Fernandes (Google) | 12af660 | 2019-12-19 11:22:42 -0500 | [diff] [blame] | 703 | (mem_begin - mem_during) >> (20 - PAGE_SHIFT)); |
| 704 | |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 705 | if (shutdown) { |
| 706 | smp_mb(); /* Assign before wake. */ |
| 707 | wake_up(&shutdown_wq); |
| 708 | } |
| 709 | } |
| 710 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 711 | torture_kthread_stopping("kfree_scale_thread"); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 712 | return 0; |
| 713 | } |
| 714 | |
| 715 | static void |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 716 | kfree_scale_cleanup(void) |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 717 | { |
| 718 | int i; |
| 719 | |
| 720 | if (torture_cleanup_begin()) |
| 721 | return; |
| 722 | |
| 723 | if (kfree_reader_tasks) { |
| 724 | for (i = 0; i < kfree_nrealthreads; i++) |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 725 | torture_stop_kthread(kfree_scale_thread, |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 726 | kfree_reader_tasks[i]); |
| 727 | kfree(kfree_reader_tasks); |
| 728 | } |
| 729 | |
| 730 | torture_cleanup_end(); |
| 731 | } |
| 732 | |
| 733 | /* |
| 734 | * shutdown kthread. Just waits to be awakened, then shuts down system. |
| 735 | */ |
| 736 | static int |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 737 | kfree_scale_shutdown(void *arg) |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 738 | { |
Joel Fernandes (Google) | 7e86646 | 2020-05-25 00:36:47 -0400 | [diff] [blame] | 739 | wait_event(shutdown_wq, |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 740 | atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 741 | |
| 742 | smp_mb(); /* Wake before output. */ |
| 743 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 744 | kfree_scale_cleanup(); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 745 | kernel_power_off(); |
| 746 | return -EINVAL; |
| 747 | } |
| 748 | |
| 749 | static int __init |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 750 | kfree_scale_init(void) |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 751 | { |
| 752 | long i; |
| 753 | int firsterr = 0; |
| 754 | |
| 755 | kfree_nrealthreads = compute_real(kfree_nthreads); |
| 756 | /* Start up the kthreads. */ |
| 757 | if (shutdown) { |
| 758 | init_waitqueue_head(&shutdown_wq); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 759 | firsterr = torture_create_kthread(kfree_scale_shutdown, NULL, |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 760 | shutdown_task); |
Paul E. McKenney | eb77abfd | 2021-08-05 15:58:53 -0700 | [diff] [blame] | 761 | if (torture_init_error(firsterr)) |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 762 | goto unwind; |
| 763 | schedule_timeout_uninterruptible(1); |
| 764 | } |
| 765 | |
Kefeng Wang | b3e2d20 | 2020-04-17 12:02:45 +0800 | [diff] [blame] | 766 | pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj)); |
Joel Fernandes (Google) | f87dc80 | 2020-03-16 12:32:26 -0400 | [diff] [blame] | 767 | |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 768 | kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]), |
| 769 | GFP_KERNEL); |
| 770 | if (kfree_reader_tasks == NULL) { |
| 771 | firsterr = -ENOMEM; |
| 772 | goto unwind; |
| 773 | } |
| 774 | |
| 775 | for (i = 0; i < kfree_nrealthreads; i++) { |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 776 | firsterr = torture_create_kthread(kfree_scale_thread, (void *)i, |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 777 | kfree_reader_tasks[i]); |
Paul E. McKenney | eb77abfd | 2021-08-05 15:58:53 -0700 | [diff] [blame] | 778 | if (torture_init_error(firsterr)) |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 779 | goto unwind; |
| 780 | } |
| 781 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 782 | while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads) |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 783 | schedule_timeout_uninterruptible(1); |
| 784 | |
| 785 | torture_init_end(); |
| 786 | return 0; |
| 787 | |
| 788 | unwind: |
| 789 | torture_init_end(); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 790 | kfree_scale_cleanup(); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 791 | return firsterr; |
| 792 | } |
| 793 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 794 | static int __init |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 795 | rcu_scale_init(void) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 796 | { |
| 797 | long i; |
| 798 | int firsterr = 0; |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 799 | static struct rcu_scale_ops *scale_ops[] = { |
Paul E. McKenney | 899f317 | 2020-09-09 12:27:03 -0700 | [diff] [blame] | 800 | &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops, &tasks_tracing_ops |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 801 | }; |
| 802 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 803 | if (!torture_init_begin(scale_type, verbose)) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 804 | return -EBUSY; |
| 805 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 806 | /* Process args and announce that the scalability'er is on the job. */ |
| 807 | for (i = 0; i < ARRAY_SIZE(scale_ops); i++) { |
| 808 | cur_ops = scale_ops[i]; |
| 809 | if (strcmp(scale_type, cur_ops->name) == 0) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 810 | break; |
| 811 | } |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 812 | if (i == ARRAY_SIZE(scale_ops)) { |
| 813 | pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type); |
| 814 | pr_alert("rcu-scale types:"); |
| 815 | for (i = 0; i < ARRAY_SIZE(scale_ops); i++) |
| 816 | pr_cont(" %s", scale_ops[i]->name); |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 817 | pr_cont("\n"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 818 | firsterr = -EINVAL; |
Paul E. McKenney | ad092c0 | 2019-03-21 10:26:41 -0700 | [diff] [blame] | 819 | cur_ops = NULL; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 820 | goto unwind; |
| 821 | } |
| 822 | if (cur_ops->init) |
| 823 | cur_ops->init(); |
| 824 | |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 825 | if (kfree_rcu_test) |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 826 | return kfree_scale_init(); |
Joel Fernandes (Google) | e6e78b0 | 2019-08-30 12:36:29 -0400 | [diff] [blame] | 827 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 828 | nrealwriters = compute_real(nwriters); |
| 829 | nrealreaders = compute_real(nreaders); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 830 | atomic_set(&n_rcu_scale_reader_started, 0); |
| 831 | atomic_set(&n_rcu_scale_writer_started, 0); |
| 832 | atomic_set(&n_rcu_scale_writer_finished, 0); |
| 833 | rcu_scale_print_module_parms(cur_ops, "Start of test"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 834 | |
| 835 | /* Start up the kthreads. */ |
| 836 | |
| 837 | if (shutdown) { |
| 838 | init_waitqueue_head(&shutdown_wq); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 839 | firsterr = torture_create_kthread(rcu_scale_shutdown, NULL, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 840 | shutdown_task); |
Paul E. McKenney | eb77abfd | 2021-08-05 15:58:53 -0700 | [diff] [blame] | 841 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 842 | goto unwind; |
| 843 | schedule_timeout_uninterruptible(1); |
| 844 | } |
| 845 | reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), |
| 846 | GFP_KERNEL); |
| 847 | if (reader_tasks == NULL) { |
Li Zhijian | 86e7ed1 | 2021-10-29 17:40:28 +0800 | [diff] [blame] | 848 | SCALEOUT_ERRSTRING("out of memory"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 849 | firsterr = -ENOMEM; |
| 850 | goto unwind; |
| 851 | } |
| 852 | for (i = 0; i < nrealreaders; i++) { |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 853 | firsterr = torture_create_kthread(rcu_scale_reader, (void *)i, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 854 | reader_tasks[i]); |
Paul E. McKenney | eb77abfd | 2021-08-05 15:58:53 -0700 | [diff] [blame] | 855 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 856 | goto unwind; |
| 857 | } |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 858 | while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 859 | schedule_timeout_uninterruptible(1); |
| 860 | writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]), |
| 861 | GFP_KERNEL); |
| 862 | writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), |
| 863 | GFP_KERNEL); |
| 864 | writer_n_durations = |
| 865 | kcalloc(nrealwriters, sizeof(*writer_n_durations), |
| 866 | GFP_KERNEL); |
| 867 | if (!writer_tasks || !writer_durations || !writer_n_durations) { |
Li Zhijian | 86e7ed1 | 2021-10-29 17:40:28 +0800 | [diff] [blame] | 868 | SCALEOUT_ERRSTRING("out of memory"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 869 | firsterr = -ENOMEM; |
| 870 | goto unwind; |
| 871 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 872 | for (i = 0; i < nrealwriters; i++) { |
| 873 | writer_durations[i] = |
| 874 | kcalloc(MAX_MEAS, sizeof(*writer_durations[i]), |
| 875 | GFP_KERNEL); |
Wei Yongjun | 05dbbfe | 2016-06-13 15:20:39 +0000 | [diff] [blame] | 876 | if (!writer_durations[i]) { |
| 877 | firsterr = -ENOMEM; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 878 | goto unwind; |
Wei Yongjun | 05dbbfe | 2016-06-13 15:20:39 +0000 | [diff] [blame] | 879 | } |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 880 | firsterr = torture_create_kthread(rcu_scale_writer, (void *)i, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 881 | writer_tasks[i]); |
Paul E. McKenney | eb77abfd | 2021-08-05 15:58:53 -0700 | [diff] [blame] | 882 | if (torture_init_error(firsterr)) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 883 | goto unwind; |
| 884 | } |
| 885 | torture_init_end(); |
| 886 | return 0; |
| 887 | |
| 888 | unwind: |
| 889 | torture_init_end(); |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 890 | rcu_scale_cleanup(); |
Paul E. McKenney | 2f2214d | 2020-09-17 10:30:46 -0700 | [diff] [blame] | 891 | if (shutdown) { |
| 892 | WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST)); |
| 893 | kernel_power_off(); |
| 894 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 895 | return firsterr; |
| 896 | } |
| 897 | |
Paul E. McKenney | 4e88ec4 | 2020-08-11 21:18:12 -0700 | [diff] [blame] | 898 | module_init(rcu_scale_init); |
| 899 | module_exit(rcu_scale_cleanup); |