Paul E. McKenney | 8bf05ed | 2019-01-17 10:09:19 -0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Read-Copy Update module-based performance-test facility |
| 4 | * |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2015 |
| 6 | * |
Paul E. McKenney | 8bf05ed | 2019-01-17 10:09:19 -0800 | [diff] [blame^] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 8 | */ |
Paul E. McKenney | 6050003 | 2018-05-15 12:25:05 -0700 | [diff] [blame] | 9 | |
| 10 | #define pr_fmt(fmt) fmt |
| 11 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 12 | #include <linux/types.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/kthread.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/smp.h> |
| 20 | #include <linux/rcupdate.h> |
| 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/sched.h> |
Ingo Molnar | ae7e81c | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 23 | #include <uapi/linux/sched/types.h> |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 24 | #include <linux/atomic.h> |
| 25 | #include <linux/bitops.h> |
| 26 | #include <linux/completion.h> |
| 27 | #include <linux/moduleparam.h> |
| 28 | #include <linux/percpu.h> |
| 29 | #include <linux/notifier.h> |
| 30 | #include <linux/reboot.h> |
| 31 | #include <linux/freezer.h> |
| 32 | #include <linux/cpu.h> |
| 33 | #include <linux/delay.h> |
| 34 | #include <linux/stat.h> |
| 35 | #include <linux/srcu.h> |
| 36 | #include <linux/slab.h> |
| 37 | #include <asm/byteorder.h> |
| 38 | #include <linux/torture.h> |
| 39 | #include <linux/vmalloc.h> |
| 40 | |
Paul E. McKenney | 25c3632 | 2017-05-03 09:51:55 -0700 | [diff] [blame] | 41 | #include "rcu.h" |
| 42 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 43 | MODULE_LICENSE("GPL"); |
Paul E. McKenney | 8bf05ed | 2019-01-17 10:09:19 -0800 | [diff] [blame^] | 44 | MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 45 | |
| 46 | #define PERF_FLAG "-perf:" |
| 47 | #define PERFOUT_STRING(s) \ |
SeongJae Park | a56fefa | 2016-08-21 16:54:39 +0900 | [diff] [blame] | 48 | pr_alert("%s" PERF_FLAG " %s\n", perf_type, s) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 49 | #define VERBOSE_PERFOUT_STRING(s) \ |
| 50 | do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0) |
| 51 | #define VERBOSE_PERFOUT_ERRSTRING(s) \ |
| 52 | do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0) |
| 53 | |
Paul E. McKenney | 85ba6bf | 2018-02-01 19:19:04 -0800 | [diff] [blame] | 54 | /* |
| 55 | * The intended use cases for the nreaders and nwriters module parameters |
| 56 | * are as follows: |
| 57 | * |
| 58 | * 1. Specify only the nr_cpus kernel boot parameter. This will |
| 59 | * set both nreaders and nwriters to the value specified by |
| 60 | * nr_cpus for a mixed reader/writer test. |
| 61 | * |
| 62 | * 2. Specify the nr_cpus kernel boot parameter, but set |
| 63 | * rcuperf.nreaders to zero. This will set nwriters to the |
| 64 | * value specified by nr_cpus for an update-only test. |
| 65 | * |
| 66 | * 3. Specify the nr_cpus kernel boot parameter, but set |
| 67 | * rcuperf.nwriters to zero. This will set nreaders to the |
| 68 | * value specified by nr_cpus for a read-only test. |
| 69 | * |
| 70 | * Various other use cases may of course be specified. |
| 71 | */ |
| 72 | |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 73 | torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives"); |
| 74 | torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader"); |
Boqun Feng | af06d4f | 2016-05-25 09:25:33 +0800 | [diff] [blame] | 75 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); |
Paul E. McKenney | df37e66 | 2016-01-30 20:56:38 -0800 | [diff] [blame] | 76 | torture_param(int, holdoff, 10, "Holdoff time before test start (s)"); |
Paul E. McKenney | 85ba6bf | 2018-02-01 19:19:04 -0800 | [diff] [blame] | 77 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 78 | torture_param(int, nwriters, -1, "Number of RCU updater threads"); |
Paul E. McKenney | 492b95e | 2017-04-21 16:09:15 -0700 | [diff] [blame] | 79 | torture_param(bool, shutdown, !IS_ENABLED(MODULE), |
| 80 | "Shutdown at end of performance tests."); |
Paul E. McKenney | 90127d6 | 2018-05-09 10:29:18 -0700 | [diff] [blame] | 81 | torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); |
Paul E. McKenney | 820687a | 2017-04-25 15:12:56 -0700 | [diff] [blame] | 82 | torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 83 | |
| 84 | static char *perf_type = "rcu"; |
| 85 | module_param(perf_type, charp, 0444); |
| 86 | MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)"); |
| 87 | |
| 88 | static int nrealreaders; |
| 89 | static int nrealwriters; |
| 90 | static struct task_struct **writer_tasks; |
| 91 | static struct task_struct **reader_tasks; |
| 92 | static struct task_struct *shutdown_task; |
| 93 | |
| 94 | static u64 **writer_durations; |
| 95 | static int *writer_n_durations; |
| 96 | static atomic_t n_rcu_perf_reader_started; |
| 97 | static atomic_t n_rcu_perf_writer_started; |
| 98 | static atomic_t n_rcu_perf_writer_finished; |
| 99 | static wait_queue_head_t shutdown_wq; |
| 100 | static u64 t_rcu_perf_writer_started; |
| 101 | static u64 t_rcu_perf_writer_finished; |
| 102 | static unsigned long b_rcu_perf_writer_started; |
| 103 | static unsigned long b_rcu_perf_writer_finished; |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 104 | static DEFINE_PER_CPU(atomic_t, n_async_inflight); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 105 | |
| 106 | static int rcu_perf_writer_state; |
| 107 | #define RTWS_INIT 0 |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 108 | #define RTWS_ASYNC 1 |
| 109 | #define RTWS_BARRIER 2 |
| 110 | #define RTWS_EXP_SYNC 3 |
| 111 | #define RTWS_SYNC 4 |
| 112 | #define RTWS_IDLE 5 |
| 113 | #define RTWS_STOPPING 6 |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 114 | |
| 115 | #define MAX_MEAS 10000 |
| 116 | #define MIN_MEAS 100 |
| 117 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 118 | /* |
| 119 | * Operations vector for selecting different types of tests. |
| 120 | */ |
| 121 | |
| 122 | struct rcu_perf_ops { |
| 123 | int ptype; |
| 124 | void (*init)(void); |
| 125 | void (*cleanup)(void); |
| 126 | int (*readlock)(void); |
| 127 | void (*readunlock)(int idx); |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 128 | unsigned long (*get_gp_seq)(void); |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 129 | unsigned long (*gp_diff)(unsigned long new, unsigned long old); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 130 | unsigned long (*exp_completed)(void); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 131 | void (*async)(struct rcu_head *head, rcu_callback_t func); |
| 132 | void (*gp_barrier)(void); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 133 | void (*sync)(void); |
| 134 | void (*exp_sync)(void); |
| 135 | const char *name; |
| 136 | }; |
| 137 | |
| 138 | static struct rcu_perf_ops *cur_ops; |
| 139 | |
| 140 | /* |
| 141 | * Definitions for rcu perf testing. |
| 142 | */ |
| 143 | |
| 144 | static int rcu_perf_read_lock(void) __acquires(RCU) |
| 145 | { |
| 146 | rcu_read_lock(); |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static void rcu_perf_read_unlock(int idx) __releases(RCU) |
| 151 | { |
| 152 | rcu_read_unlock(); |
| 153 | } |
| 154 | |
| 155 | static unsigned long __maybe_unused rcu_no_completed(void) |
| 156 | { |
| 157 | return 0; |
| 158 | } |
| 159 | |
| 160 | static void rcu_sync_perf_init(void) |
| 161 | { |
| 162 | } |
| 163 | |
| 164 | static struct rcu_perf_ops rcu_ops = { |
| 165 | .ptype = RCU_FLAVOR, |
| 166 | .init = rcu_sync_perf_init, |
| 167 | .readlock = rcu_perf_read_lock, |
| 168 | .readunlock = rcu_perf_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 169 | .get_gp_seq = rcu_get_gp_seq, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 170 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 171 | .exp_completed = rcu_exp_batches_completed, |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 172 | .async = call_rcu, |
| 173 | .gp_barrier = rcu_barrier, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 174 | .sync = synchronize_rcu, |
| 175 | .exp_sync = synchronize_rcu_expedited, |
| 176 | .name = "rcu" |
| 177 | }; |
| 178 | |
| 179 | /* |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 180 | * Definitions for srcu perf testing. |
| 181 | */ |
| 182 | |
| 183 | DEFINE_STATIC_SRCU(srcu_ctl_perf); |
| 184 | static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf; |
| 185 | |
| 186 | static int srcu_perf_read_lock(void) __acquires(srcu_ctlp) |
| 187 | { |
| 188 | return srcu_read_lock(srcu_ctlp); |
| 189 | } |
| 190 | |
| 191 | static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp) |
| 192 | { |
| 193 | srcu_read_unlock(srcu_ctlp, idx); |
| 194 | } |
| 195 | |
| 196 | static unsigned long srcu_perf_completed(void) |
| 197 | { |
| 198 | return srcu_batches_completed(srcu_ctlp); |
| 199 | } |
| 200 | |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 201 | static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func) |
| 202 | { |
| 203 | call_srcu(srcu_ctlp, head, func); |
| 204 | } |
| 205 | |
| 206 | static void srcu_rcu_barrier(void) |
| 207 | { |
| 208 | srcu_barrier(srcu_ctlp); |
| 209 | } |
| 210 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 211 | static void srcu_perf_synchronize(void) |
| 212 | { |
| 213 | synchronize_srcu(srcu_ctlp); |
| 214 | } |
| 215 | |
| 216 | static void srcu_perf_synchronize_expedited(void) |
| 217 | { |
| 218 | synchronize_srcu_expedited(srcu_ctlp); |
| 219 | } |
| 220 | |
| 221 | static struct rcu_perf_ops srcu_ops = { |
| 222 | .ptype = SRCU_FLAVOR, |
| 223 | .init = rcu_sync_perf_init, |
| 224 | .readlock = srcu_perf_read_lock, |
| 225 | .readunlock = srcu_perf_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 226 | .get_gp_seq = srcu_perf_completed, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 227 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 228 | .exp_completed = srcu_perf_completed, |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 229 | .async = srcu_call_rcu, |
| 230 | .gp_barrier = srcu_rcu_barrier, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 231 | .sync = srcu_perf_synchronize, |
| 232 | .exp_sync = srcu_perf_synchronize_expedited, |
| 233 | .name = "srcu" |
| 234 | }; |
| 235 | |
Paul E. McKenney | f60cb4d | 2017-04-19 13:43:21 -0700 | [diff] [blame] | 236 | static struct srcu_struct srcud; |
| 237 | |
| 238 | static void srcu_sync_perf_init(void) |
| 239 | { |
| 240 | srcu_ctlp = &srcud; |
| 241 | init_srcu_struct(srcu_ctlp); |
| 242 | } |
| 243 | |
| 244 | static void srcu_sync_perf_cleanup(void) |
| 245 | { |
| 246 | cleanup_srcu_struct(srcu_ctlp); |
| 247 | } |
| 248 | |
| 249 | static struct rcu_perf_ops srcud_ops = { |
| 250 | .ptype = SRCU_FLAVOR, |
| 251 | .init = srcu_sync_perf_init, |
| 252 | .cleanup = srcu_sync_perf_cleanup, |
| 253 | .readlock = srcu_perf_read_lock, |
| 254 | .readunlock = srcu_perf_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 255 | .get_gp_seq = srcu_perf_completed, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 256 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | f60cb4d | 2017-04-19 13:43:21 -0700 | [diff] [blame] | 257 | .exp_completed = srcu_perf_completed, |
| 258 | .async = srcu_call_rcu, |
| 259 | .gp_barrier = srcu_rcu_barrier, |
| 260 | .sync = srcu_perf_synchronize, |
| 261 | .exp_sync = srcu_perf_synchronize_expedited, |
| 262 | .name = "srcud" |
| 263 | }; |
| 264 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 265 | /* |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 266 | * Definitions for RCU-tasks perf testing. |
| 267 | */ |
| 268 | |
| 269 | static int tasks_perf_read_lock(void) |
| 270 | { |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | static void tasks_perf_read_unlock(int idx) |
| 275 | { |
| 276 | } |
| 277 | |
| 278 | static struct rcu_perf_ops tasks_ops = { |
| 279 | .ptype = RCU_TASKS_FLAVOR, |
| 280 | .init = rcu_sync_perf_init, |
| 281 | .readlock = tasks_perf_read_lock, |
| 282 | .readunlock = tasks_perf_read_unlock, |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 283 | .get_gp_seq = rcu_no_completed, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 284 | .gp_diff = rcu_seq_diff, |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 285 | .async = call_rcu_tasks, |
| 286 | .gp_barrier = rcu_barrier_tasks, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 287 | .sync = synchronize_rcu_tasks, |
| 288 | .exp_sync = synchronize_rcu_tasks, |
| 289 | .name = "tasks" |
| 290 | }; |
| 291 | |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 292 | static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old) |
| 293 | { |
| 294 | if (!cur_ops->gp_diff) |
| 295 | return new - old; |
| 296 | return cur_ops->gp_diff(new, old); |
| 297 | } |
| 298 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 299 | /* |
| 300 | * If performance tests complete, wait for shutdown to commence. |
| 301 | */ |
| 302 | static void rcu_perf_wait_shutdown(void) |
| 303 | { |
Paul E. McKenney | cee4393 | 2018-03-02 16:35:27 -0800 | [diff] [blame] | 304 | cond_resched_tasks_rcu_qs(); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 305 | if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters) |
| 306 | return; |
| 307 | while (!torture_must_stop()) |
| 308 | schedule_timeout_uninterruptible(1); |
| 309 | } |
| 310 | |
| 311 | /* |
| 312 | * RCU perf reader kthread. Repeatedly does empty RCU read-side |
| 313 | * critical section, minimizing update-side interference. |
| 314 | */ |
| 315 | static int |
| 316 | rcu_perf_reader(void *arg) |
| 317 | { |
| 318 | unsigned long flags; |
| 319 | int idx; |
Paul E. McKenney | 6b558c4 | 2016-01-12 14:15:40 -0800 | [diff] [blame] | 320 | long me = (long)arg; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 321 | |
| 322 | VERBOSE_PERFOUT_STRING("rcu_perf_reader task started"); |
Paul E. McKenney | 6b558c4 | 2016-01-12 14:15:40 -0800 | [diff] [blame] | 323 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 324 | set_user_nice(current, MAX_NICE); |
| 325 | atomic_inc(&n_rcu_perf_reader_started); |
| 326 | |
| 327 | do { |
| 328 | local_irq_save(flags); |
| 329 | idx = cur_ops->readlock(); |
| 330 | cur_ops->readunlock(idx); |
| 331 | local_irq_restore(flags); |
| 332 | rcu_perf_wait_shutdown(); |
| 333 | } while (!torture_must_stop()); |
| 334 | torture_kthread_stopping("rcu_perf_reader"); |
| 335 | return 0; |
| 336 | } |
| 337 | |
| 338 | /* |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 339 | * Callback function for asynchronous grace periods from rcu_perf_writer(). |
| 340 | */ |
| 341 | static void rcu_perf_async_cb(struct rcu_head *rhp) |
| 342 | { |
| 343 | atomic_dec(this_cpu_ptr(&n_async_inflight)); |
| 344 | kfree(rhp); |
| 345 | } |
| 346 | |
| 347 | /* |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 348 | * RCU perf writer kthread. Repeatedly does a grace period. |
| 349 | */ |
| 350 | static int |
| 351 | rcu_perf_writer(void *arg) |
| 352 | { |
| 353 | int i = 0; |
| 354 | int i_max; |
| 355 | long me = (long)arg; |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 356 | struct rcu_head *rhp = NULL; |
Paul E. McKenney | 2094c99 | 2016-01-12 15:17:21 -0800 | [diff] [blame] | 357 | struct sched_param sp; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 358 | bool started = false, done = false, alldone = false; |
| 359 | u64 t; |
| 360 | u64 *wdp; |
| 361 | u64 *wdpp = writer_durations[me]; |
| 362 | |
| 363 | VERBOSE_PERFOUT_STRING("rcu_perf_writer task started"); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 364 | WARN_ON(!wdpp); |
Paul E. McKenney | 6b558c4 | 2016-01-12 14:15:40 -0800 | [diff] [blame] | 365 | set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); |
Paul E. McKenney | 2094c99 | 2016-01-12 15:17:21 -0800 | [diff] [blame] | 366 | sp.sched_priority = 1; |
| 367 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
Paul E. McKenney | df37e66 | 2016-01-30 20:56:38 -0800 | [diff] [blame] | 368 | |
| 369 | if (holdoff) |
| 370 | schedule_timeout_uninterruptible(holdoff * HZ); |
| 371 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 372 | t = ktime_get_mono_fast_ns(); |
| 373 | if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) { |
| 374 | t_rcu_perf_writer_started = t; |
| 375 | if (gp_exp) { |
| 376 | b_rcu_perf_writer_started = |
| 377 | cur_ops->exp_completed() / 2; |
| 378 | } else { |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 379 | b_rcu_perf_writer_started = cur_ops->get_gp_seq(); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 380 | } |
| 381 | } |
| 382 | |
| 383 | do { |
Paul E. McKenney | 820687a | 2017-04-25 15:12:56 -0700 | [diff] [blame] | 384 | if (writer_holdoff) |
| 385 | udelay(writer_holdoff); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 386 | wdp = &wdpp[i]; |
| 387 | *wdp = ktime_get_mono_fast_ns(); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 388 | if (gp_async) { |
| 389 | retry: |
| 390 | if (!rhp) |
| 391 | rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
| 392 | if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) { |
| 393 | rcu_perf_writer_state = RTWS_ASYNC; |
| 394 | atomic_inc(this_cpu_ptr(&n_async_inflight)); |
| 395 | cur_ops->async(rhp, rcu_perf_async_cb); |
| 396 | rhp = NULL; |
| 397 | } else if (!kthread_should_stop()) { |
| 398 | rcu_perf_writer_state = RTWS_BARRIER; |
| 399 | cur_ops->gp_barrier(); |
| 400 | goto retry; |
| 401 | } else { |
| 402 | kfree(rhp); /* Because we are stopping. */ |
| 403 | } |
| 404 | } else if (gp_exp) { |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 405 | rcu_perf_writer_state = RTWS_EXP_SYNC; |
| 406 | cur_ops->exp_sync(); |
| 407 | } else { |
| 408 | rcu_perf_writer_state = RTWS_SYNC; |
| 409 | cur_ops->sync(); |
| 410 | } |
| 411 | rcu_perf_writer_state = RTWS_IDLE; |
| 412 | t = ktime_get_mono_fast_ns(); |
| 413 | *wdp = t - *wdp; |
| 414 | i_max = i; |
| 415 | if (!started && |
| 416 | atomic_read(&n_rcu_perf_writer_started) >= nrealwriters) |
| 417 | started = true; |
| 418 | if (!done && i >= MIN_MEAS) { |
| 419 | done = true; |
Paul E. McKenney | 620316e | 2016-01-30 21:32:09 -0800 | [diff] [blame] | 420 | sp.sched_priority = 0; |
| 421 | sched_setscheduler_nocheck(current, |
| 422 | SCHED_NORMAL, &sp); |
SeongJae Park | a56fefa | 2016-08-21 16:54:39 +0900 | [diff] [blame] | 423 | pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n", |
| 424 | perf_type, PERF_FLAG, me, MIN_MEAS); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 425 | if (atomic_inc_return(&n_rcu_perf_writer_finished) >= |
| 426 | nrealwriters) { |
Paul E. McKenney | 620316e | 2016-01-30 21:32:09 -0800 | [diff] [blame] | 427 | schedule_timeout_interruptible(10); |
Paul E. McKenney | ac2bb27 | 2016-01-29 14:58:17 -0800 | [diff] [blame] | 428 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 429 | PERFOUT_STRING("Test complete"); |
| 430 | t_rcu_perf_writer_finished = t; |
| 431 | if (gp_exp) { |
| 432 | b_rcu_perf_writer_finished = |
| 433 | cur_ops->exp_completed() / 2; |
| 434 | } else { |
| 435 | b_rcu_perf_writer_finished = |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 436 | cur_ops->get_gp_seq(); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 437 | } |
Artem Savkov | e6fb1fc | 2016-02-07 13:31:39 +0100 | [diff] [blame] | 438 | if (shutdown) { |
| 439 | smp_mb(); /* Assign before wake. */ |
| 440 | wake_up(&shutdown_wq); |
| 441 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 442 | } |
| 443 | } |
| 444 | if (done && !alldone && |
| 445 | atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters) |
| 446 | alldone = true; |
| 447 | if (started && !alldone && i < MAX_MEAS - 1) |
| 448 | i++; |
| 449 | rcu_perf_wait_shutdown(); |
| 450 | } while (!torture_must_stop()); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 451 | if (gp_async) { |
| 452 | rcu_perf_writer_state = RTWS_BARRIER; |
| 453 | cur_ops->gp_barrier(); |
| 454 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 455 | rcu_perf_writer_state = RTWS_STOPPING; |
| 456 | writer_n_durations[me] = i_max; |
| 457 | torture_kthread_stopping("rcu_perf_writer"); |
| 458 | return 0; |
| 459 | } |
| 460 | |
Paul E. McKenney | 9622179 | 2018-05-17 11:33:17 -0700 | [diff] [blame] | 461 | static void |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 462 | rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag) |
| 463 | { |
| 464 | pr_alert("%s" PERF_FLAG |
| 465 | "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n", |
| 466 | perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown); |
| 467 | } |
| 468 | |
| 469 | static void |
| 470 | rcu_perf_cleanup(void) |
| 471 | { |
| 472 | int i; |
| 473 | int j; |
| 474 | int ngps = 0; |
| 475 | u64 *wdp; |
| 476 | u64 *wdpp; |
| 477 | |
Paul E. McKenney | 9683937 | 2017-04-14 16:12:52 -0700 | [diff] [blame] | 478 | /* |
| 479 | * Would like warning at start, but everything is expedited |
| 480 | * during the mid-boot phase, so have to wait till the end. |
| 481 | */ |
| 482 | if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp) |
| 483 | VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!"); |
| 484 | if (rcu_gp_is_normal() && gp_exp) |
| 485 | VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!"); |
Paul E. McKenney | 881ed59 | 2017-04-17 12:47:10 -0700 | [diff] [blame] | 486 | if (gp_exp && gp_async) |
| 487 | VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!"); |
Paul E. McKenney | 9683937 | 2017-04-14 16:12:52 -0700 | [diff] [blame] | 488 | |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 489 | if (torture_cleanup_begin()) |
| 490 | return; |
| 491 | |
| 492 | if (reader_tasks) { |
| 493 | for (i = 0; i < nrealreaders; i++) |
| 494 | torture_stop_kthread(rcu_perf_reader, |
| 495 | reader_tasks[i]); |
| 496 | kfree(reader_tasks); |
| 497 | } |
| 498 | |
| 499 | if (writer_tasks) { |
| 500 | for (i = 0; i < nrealwriters; i++) { |
| 501 | torture_stop_kthread(rcu_perf_writer, |
| 502 | writer_tasks[i]); |
| 503 | if (!writer_n_durations) |
| 504 | continue; |
| 505 | j = writer_n_durations[i]; |
| 506 | pr_alert("%s%s writer %d gps: %d\n", |
| 507 | perf_type, PERF_FLAG, i, j); |
| 508 | ngps += j; |
| 509 | } |
| 510 | pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n", |
| 511 | perf_type, PERF_FLAG, |
| 512 | t_rcu_perf_writer_started, t_rcu_perf_writer_finished, |
| 513 | t_rcu_perf_writer_finished - |
| 514 | t_rcu_perf_writer_started, |
| 515 | ngps, |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 516 | rcuperf_seq_diff(b_rcu_perf_writer_finished, |
| 517 | b_rcu_perf_writer_started)); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 518 | for (i = 0; i < nrealwriters; i++) { |
| 519 | if (!writer_durations) |
| 520 | break; |
| 521 | if (!writer_n_durations) |
| 522 | continue; |
| 523 | wdpp = writer_durations[i]; |
| 524 | if (!wdpp) |
| 525 | continue; |
| 526 | for (j = 0; j <= writer_n_durations[i]; j++) { |
| 527 | wdp = &wdpp[j]; |
| 528 | pr_alert("%s%s %4d writer-duration: %5d %llu\n", |
| 529 | perf_type, PERF_FLAG, |
| 530 | i, j, *wdp); |
| 531 | if (j % 100 == 0) |
| 532 | schedule_timeout_uninterruptible(1); |
| 533 | } |
| 534 | kfree(writer_durations[i]); |
| 535 | } |
| 536 | kfree(writer_tasks); |
| 537 | kfree(writer_durations); |
| 538 | kfree(writer_n_durations); |
| 539 | } |
| 540 | |
Paul E. McKenney | 620d246 | 2018-07-07 18:25:10 -0700 | [diff] [blame] | 541 | /* Do torture-type-specific cleanup operations. */ |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 542 | if (cur_ops->cleanup != NULL) |
| 543 | cur_ops->cleanup(); |
| 544 | |
| 545 | torture_cleanup_end(); |
| 546 | } |
| 547 | |
| 548 | /* |
| 549 | * Return the number if non-negative. If -1, the number of CPUs. |
| 550 | * If less than -1, that much less than the number of CPUs, but |
| 551 | * at least one. |
| 552 | */ |
| 553 | static int compute_real(int n) |
| 554 | { |
| 555 | int nr; |
| 556 | |
| 557 | if (n >= 0) { |
| 558 | nr = n; |
| 559 | } else { |
| 560 | nr = num_online_cpus() + 1 + n; |
| 561 | if (nr <= 0) |
| 562 | nr = 1; |
| 563 | } |
| 564 | return nr; |
| 565 | } |
| 566 | |
| 567 | /* |
| 568 | * RCU perf shutdown kthread. Just waits to be awakened, then shuts |
| 569 | * down system. |
| 570 | */ |
| 571 | static int |
| 572 | rcu_perf_shutdown(void *arg) |
| 573 | { |
| 574 | do { |
| 575 | wait_event(shutdown_wq, |
| 576 | atomic_read(&n_rcu_perf_writer_finished) >= |
| 577 | nrealwriters); |
| 578 | } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters); |
| 579 | smp_mb(); /* Wake before output. */ |
| 580 | rcu_perf_cleanup(); |
| 581 | kernel_power_off(); |
| 582 | return -EINVAL; |
| 583 | } |
| 584 | |
| 585 | static int __init |
| 586 | rcu_perf_init(void) |
| 587 | { |
| 588 | long i; |
| 589 | int firsterr = 0; |
| 590 | static struct rcu_perf_ops *perf_ops[] = { |
Paul E. McKenney | 620d246 | 2018-07-07 18:25:10 -0700 | [diff] [blame] | 591 | &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 592 | }; |
| 593 | |
Paul E. McKenney | a2f2577 | 2017-11-21 20:19:17 -0800 | [diff] [blame] | 594 | if (!torture_init_begin(perf_type, verbose)) |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 595 | return -EBUSY; |
| 596 | |
| 597 | /* Process args and tell the world that the perf'er is on the job. */ |
| 598 | for (i = 0; i < ARRAY_SIZE(perf_ops); i++) { |
| 599 | cur_ops = perf_ops[i]; |
| 600 | if (strcmp(perf_type, cur_ops->name) == 0) |
| 601 | break; |
| 602 | } |
| 603 | if (i == ARRAY_SIZE(perf_ops)) { |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 604 | pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 605 | pr_alert("rcu-perf types:"); |
| 606 | for (i = 0; i < ARRAY_SIZE(perf_ops); i++) |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 607 | pr_cont(" %s", perf_ops[i]->name); |
| 608 | pr_cont("\n"); |
Paul E. McKenney | f028806 | 2018-07-07 18:26:50 -0700 | [diff] [blame] | 609 | WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST)); |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 610 | firsterr = -EINVAL; |
| 611 | goto unwind; |
| 612 | } |
| 613 | if (cur_ops->init) |
| 614 | cur_ops->init(); |
| 615 | |
| 616 | nrealwriters = compute_real(nwriters); |
| 617 | nrealreaders = compute_real(nreaders); |
| 618 | atomic_set(&n_rcu_perf_reader_started, 0); |
| 619 | atomic_set(&n_rcu_perf_writer_started, 0); |
| 620 | atomic_set(&n_rcu_perf_writer_finished, 0); |
| 621 | rcu_perf_print_module_parms(cur_ops, "Start of test"); |
| 622 | |
| 623 | /* Start up the kthreads. */ |
| 624 | |
| 625 | if (shutdown) { |
| 626 | init_waitqueue_head(&shutdown_wq); |
| 627 | firsterr = torture_create_kthread(rcu_perf_shutdown, NULL, |
| 628 | shutdown_task); |
| 629 | if (firsterr) |
| 630 | goto unwind; |
| 631 | schedule_timeout_uninterruptible(1); |
| 632 | } |
| 633 | reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), |
| 634 | GFP_KERNEL); |
| 635 | if (reader_tasks == NULL) { |
| 636 | VERBOSE_PERFOUT_ERRSTRING("out of memory"); |
| 637 | firsterr = -ENOMEM; |
| 638 | goto unwind; |
| 639 | } |
| 640 | for (i = 0; i < nrealreaders; i++) { |
Paul E. McKenney | 6b558c4 | 2016-01-12 14:15:40 -0800 | [diff] [blame] | 641 | firsterr = torture_create_kthread(rcu_perf_reader, (void *)i, |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 642 | reader_tasks[i]); |
| 643 | if (firsterr) |
| 644 | goto unwind; |
| 645 | } |
| 646 | while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders) |
| 647 | schedule_timeout_uninterruptible(1); |
| 648 | writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]), |
| 649 | GFP_KERNEL); |
| 650 | writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), |
| 651 | GFP_KERNEL); |
| 652 | writer_n_durations = |
| 653 | kcalloc(nrealwriters, sizeof(*writer_n_durations), |
| 654 | GFP_KERNEL); |
| 655 | if (!writer_tasks || !writer_durations || !writer_n_durations) { |
| 656 | VERBOSE_PERFOUT_ERRSTRING("out of memory"); |
| 657 | firsterr = -ENOMEM; |
| 658 | goto unwind; |
| 659 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 660 | for (i = 0; i < nrealwriters; i++) { |
| 661 | writer_durations[i] = |
| 662 | kcalloc(MAX_MEAS, sizeof(*writer_durations[i]), |
| 663 | GFP_KERNEL); |
Wei Yongjun | 05dbbfe | 2016-06-13 15:20:39 +0000 | [diff] [blame] | 664 | if (!writer_durations[i]) { |
| 665 | firsterr = -ENOMEM; |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 666 | goto unwind; |
Wei Yongjun | 05dbbfe | 2016-06-13 15:20:39 +0000 | [diff] [blame] | 667 | } |
Paul E. McKenney | 8704baa | 2015-12-31 18:33:22 -0800 | [diff] [blame] | 668 | firsterr = torture_create_kthread(rcu_perf_writer, (void *)i, |
| 669 | writer_tasks[i]); |
| 670 | if (firsterr) |
| 671 | goto unwind; |
| 672 | } |
| 673 | torture_init_end(); |
| 674 | return 0; |
| 675 | |
| 676 | unwind: |
| 677 | torture_init_end(); |
| 678 | rcu_perf_cleanup(); |
| 679 | return firsterr; |
| 680 | } |
| 681 | |
| 682 | module_init(rcu_perf_init); |
| 683 | module_exit(rcu_perf_cleanup); |