blob: 02dd9767b5591bd14dc37e4638382043f52633a0 [file] [log] [blame]
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -04001// SPDX-License-Identifier: GPL-2.0+
2//
Paul E. McKenney8e4ec3d2020-06-17 11:33:54 -07003// Scalability test comparing RCU vs other mechanisms
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -04004// for acquiring references on objects.
5//
6// Copyright (C) Google, 2020.
7//
8// Author: Joel Fernandes <joel@joelfernandes.org>
9
10#define pr_fmt(fmt) fmt
11
12#include <linux/atomic.h>
13#include <linux/bitops.h>
14#include <linux/completion.h>
15#include <linux/cpu.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/kthread.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/notifier.h>
26#include <linux/percpu.h>
27#include <linux/rcupdate.h>
Paul E. McKenney72bb7492020-06-02 08:34:41 -070028#include <linux/rcupdate_trace.h>
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040029#include <linux/reboot.h>
30#include <linux/sched.h>
31#include <linux/spinlock.h>
32#include <linux/smp.h>
33#include <linux/stat.h>
34#include <linux/srcu.h>
35#include <linux/slab.h>
36#include <linux/torture.h>
37#include <linux/types.h>
38
39#include "rcu.h"
40
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -070041#define SCALE_FLAG "-ref-scale: "
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040042
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -070043#define SCALEOUT(s, x...) \
44 pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040045
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -070046#define VERBOSE_SCALEOUT(s, x...) \
47 do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040048
Paul E. McKenneye76506f2020-11-15 10:24:52 -080049static atomic_t verbose_batch_ctr;
50
51#define VERBOSE_SCALEOUT_BATCH(s, x...) \
52do { \
53 if (verbose && \
54 (verbose_batched <= 0 || \
Paul E. McKenney414c1162020-11-25 10:50:35 -080055 !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \
56 schedule_timeout_uninterruptible(1); \
Paul E. McKenneye76506f2020-11-15 10:24:52 -080057 pr_alert("%s" SCALE_FLAG s, scale_type, ## x); \
Paul E. McKenney414c1162020-11-25 10:50:35 -080058 } \
Paul E. McKenneye76506f2020-11-15 10:24:52 -080059} while (0)
60
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -070061#define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \
62 do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040063
64MODULE_LICENSE("GPL");
65MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
66
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -070067static char *scale_type = "rcu";
68module_param(scale_type, charp, 0444);
69MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040070
71torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
Paul E. McKenneye76506f2020-11-15 10:24:52 -080072torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040073
Paul E. McKenney777a54c2020-05-25 14:16:44 -070074// Wait until there are multiple CPUs before starting test.
Paul E. McKenney8e4ec3d2020-06-17 11:33:54 -070075torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
Paul E. McKenney777a54c2020-05-25 14:16:44 -070076 "Holdoff time before test start (s)");
77// Number of loops per experiment, all readers execute operations concurrently.
Paul E. McKenney4dd72a32020-05-29 13:11:26 -070078torture_param(long, loops, 10000, "Number of loops per experiment.");
Paul E. McKenney8fc28782020-05-25 15:48:38 -070079// Number of readers, with -1 defaulting to about 75% of the CPUs.
80torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
81// Number of runs.
82torture_param(int, nruns, 30, "Number of experiments to run.");
Paul E. McKenney918b3512020-05-31 18:14:57 -070083// Reader delay in nanoseconds, 0 for no delay.
84torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040085
86#ifdef MODULE
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -070087# define REFSCALE_SHUTDOWN 0
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040088#else
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -070089# define REFSCALE_SHUTDOWN 1
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040090#endif
91
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -070092torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
93 "Shutdown at end of scalability tests.");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040094
95struct reader_task {
96 struct task_struct *task;
Paul E. McKenneyaf2789d2020-05-26 11:22:03 -070097 int start_reader;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040098 wait_queue_head_t wq;
99 u64 last_duration_ns;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400100};
101
102static struct task_struct *shutdown_task;
103static wait_queue_head_t shutdown_wq;
104
105static struct task_struct *main_task;
106static wait_queue_head_t main_wq;
107static int shutdown_start;
108
109static struct reader_task *reader_tasks;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400110
111// Number of readers that are part of the current experiment.
112static atomic_t nreaders_exp;
113
114// Use to wait for all threads to start.
115static atomic_t n_init;
Paul E. McKenney86e0da22020-05-26 11:40:52 -0700116static atomic_t n_started;
Paul E. McKenney2db0bda2020-05-26 12:34:57 -0700117static atomic_t n_warmedup;
118static atomic_t n_cooleddown;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400119
120// Track which experiment is currently running.
121static int exp_idx;
122
123// Operations vector for selecting different types of tests.
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700124struct ref_scale_ops {
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400125 void (*init)(void);
126 void (*cleanup)(void);
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700127 void (*readsection)(const int nloops);
Paul E. McKenney918b3512020-05-31 18:14:57 -0700128 void (*delaysection)(const int nloops, const int udl, const int ndl);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400129 const char *name;
130};
131
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700132static struct ref_scale_ops *cur_ops;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400133
Paul E. McKenney918b3512020-05-31 18:14:57 -0700134static void un_delay(const int udl, const int ndl)
135{
136 if (udl)
137 udelay(udl);
138 if (ndl)
139 ndelay(ndl);
140}
141
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700142static void ref_rcu_read_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400143{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700144 int i;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400145
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700146 for (i = nloops; i >= 0; i--) {
147 rcu_read_lock();
148 rcu_read_unlock();
149 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400150}
151
Paul E. McKenney918b3512020-05-31 18:14:57 -0700152static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700153{
154 int i;
155
156 for (i = nloops; i >= 0; i--) {
157 rcu_read_lock();
Paul E. McKenney918b3512020-05-31 18:14:57 -0700158 un_delay(udl, ndl);
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700159 rcu_read_unlock();
160 }
161}
162
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700163static void rcu_sync_scale_init(void)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400164{
165}
166
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700167static struct ref_scale_ops rcu_ops = {
168 .init = rcu_sync_scale_init,
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700169 .readsection = ref_rcu_read_section,
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700170 .delaysection = ref_rcu_delay_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400171 .name = "rcu"
172};
173
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700174// Definitions for SRCU ref scale testing.
175DEFINE_STATIC_SRCU(srcu_refctl_scale);
176static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400177
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700178static void srcu_ref_scale_read_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400179{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700180 int i;
181 int idx;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400182
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700183 for (i = nloops; i >= 0; i--) {
184 idx = srcu_read_lock(srcu_ctlp);
185 srcu_read_unlock(srcu_ctlp, idx);
186 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400187}
188
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700189static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700190{
191 int i;
192 int idx;
193
194 for (i = nloops; i >= 0; i--) {
195 idx = srcu_read_lock(srcu_ctlp);
Paul E. McKenney918b3512020-05-31 18:14:57 -0700196 un_delay(udl, ndl);
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700197 srcu_read_unlock(srcu_ctlp, idx);
198 }
199}
200
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700201static struct ref_scale_ops srcu_ops = {
202 .init = rcu_sync_scale_init,
203 .readsection = srcu_ref_scale_read_section,
204 .delaysection = srcu_ref_scale_delay_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400205 .name = "srcu"
206};
207
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700208// Definitions for RCU Tasks ref scale testing: Empty read markers.
Paul E. McKenneye13ef442020-06-03 11:56:34 -0700209// These definitions also work for RCU Rude readers.
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700210static void rcu_tasks_ref_scale_read_section(const int nloops)
Paul E. McKenneye13ef442020-06-03 11:56:34 -0700211{
212 int i;
213
214 for (i = nloops; i >= 0; i--)
215 continue;
216}
217
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700218static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
Paul E. McKenneye13ef442020-06-03 11:56:34 -0700219{
220 int i;
221
222 for (i = nloops; i >= 0; i--)
223 un_delay(udl, ndl);
224}
225
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700226static struct ref_scale_ops rcu_tasks_ops = {
227 .init = rcu_sync_scale_init,
228 .readsection = rcu_tasks_ref_scale_read_section,
229 .delaysection = rcu_tasks_ref_scale_delay_section,
Paul E. McKenneye13ef442020-06-03 11:56:34 -0700230 .name = "rcu-tasks"
231};
232
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700233// Definitions for RCU Tasks Trace ref scale testing.
234static void rcu_trace_ref_scale_read_section(const int nloops)
Paul E. McKenney72bb7492020-06-02 08:34:41 -0700235{
236 int i;
237
238 for (i = nloops; i >= 0; i--) {
239 rcu_read_lock_trace();
240 rcu_read_unlock_trace();
241 }
242}
243
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700244static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
Paul E. McKenney72bb7492020-06-02 08:34:41 -0700245{
246 int i;
247
248 for (i = nloops; i >= 0; i--) {
249 rcu_read_lock_trace();
250 un_delay(udl, ndl);
251 rcu_read_unlock_trace();
252 }
253}
254
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700255static struct ref_scale_ops rcu_trace_ops = {
256 .init = rcu_sync_scale_init,
257 .readsection = rcu_trace_ref_scale_read_section,
258 .delaysection = rcu_trace_ref_scale_delay_section,
Paul E. McKenney72bb7492020-06-02 08:34:41 -0700259 .name = "rcu-trace"
260};
261
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400262// Definitions for reference count
263static atomic_t refcnt;
264
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700265static void ref_refcnt_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400266{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700267 int i;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400268
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700269 for (i = nloops; i >= 0; i--) {
270 atomic_inc(&refcnt);
271 atomic_dec(&refcnt);
272 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400273}
274
Paul E. McKenney918b3512020-05-31 18:14:57 -0700275static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700276{
277 int i;
278
279 for (i = nloops; i >= 0; i--) {
280 atomic_inc(&refcnt);
Paul E. McKenney918b3512020-05-31 18:14:57 -0700281 un_delay(udl, ndl);
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700282 atomic_dec(&refcnt);
283 }
284}
285
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700286static struct ref_scale_ops refcnt_ops = {
287 .init = rcu_sync_scale_init,
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700288 .readsection = ref_refcnt_section,
289 .delaysection = ref_refcnt_delay_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400290 .name = "refcnt"
291};
292
293// Definitions for rwlock
294static rwlock_t test_rwlock;
295
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700296static void ref_rwlock_init(void)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400297{
298 rwlock_init(&test_rwlock);
299}
300
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700301static void ref_rwlock_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400302{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700303 int i;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400304
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700305 for (i = nloops; i >= 0; i--) {
306 read_lock(&test_rwlock);
307 read_unlock(&test_rwlock);
308 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400309}
310
Paul E. McKenney918b3512020-05-31 18:14:57 -0700311static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700312{
313 int i;
314
315 for (i = nloops; i >= 0; i--) {
316 read_lock(&test_rwlock);
Paul E. McKenney918b3512020-05-31 18:14:57 -0700317 un_delay(udl, ndl);
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700318 read_unlock(&test_rwlock);
319 }
320}
321
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700322static struct ref_scale_ops rwlock_ops = {
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700323 .init = ref_rwlock_init,
324 .readsection = ref_rwlock_section,
325 .delaysection = ref_rwlock_delay_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400326 .name = "rwlock"
327};
328
329// Definitions for rwsem
330static struct rw_semaphore test_rwsem;
331
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700332static void ref_rwsem_init(void)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400333{
334 init_rwsem(&test_rwsem);
335}
336
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700337static void ref_rwsem_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400338{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700339 int i;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400340
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700341 for (i = nloops; i >= 0; i--) {
342 down_read(&test_rwsem);
343 up_read(&test_rwsem);
344 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400345}
346
Paul E. McKenney918b3512020-05-31 18:14:57 -0700347static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700348{
349 int i;
350
351 for (i = nloops; i >= 0; i--) {
352 down_read(&test_rwsem);
Paul E. McKenney918b3512020-05-31 18:14:57 -0700353 un_delay(udl, ndl);
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700354 up_read(&test_rwsem);
355 }
356}
357
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700358static struct ref_scale_ops rwsem_ops = {
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700359 .init = ref_rwsem_init,
360 .readsection = ref_rwsem_section,
361 .delaysection = ref_rwsem_delay_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400362 .name = "rwsem"
363};
364
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700365static void rcu_scale_one_reader(void)
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700366{
367 if (readdelay <= 0)
368 cur_ops->readsection(loops);
369 else
Paul E. McKenney918b3512020-05-31 18:14:57 -0700370 cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700371}
372
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400373// Reader kthread. Repeatedly does empty RCU read-side
374// critical section, minimizing update-side interference.
375static int
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700376ref_scale_reader(void *arg)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400377{
378 unsigned long flags;
379 long me = (long)arg;
380 struct reader_task *rt = &(reader_tasks[me]);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400381 u64 start;
382 s64 duration;
383
Paul E. McKenneye76506f2020-11-15 10:24:52 -0800384 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400385 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
386 set_user_nice(current, MAX_NICE);
387 atomic_inc(&n_init);
Paul E. McKenney777a54c2020-05-25 14:16:44 -0700388 if (holdoff)
389 schedule_timeout_interruptible(holdoff * HZ);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400390repeat:
Paul E. McKenneye76506f2020-11-15 10:24:52 -0800391 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400392
393 // Wait for signal that this reader can start.
Paul E. McKenneyaf2789d2020-05-26 11:22:03 -0700394 wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400395 torture_must_stop());
396
397 if (torture_must_stop())
398 goto end;
399
400 // Make sure that the CPU is affinitized appropriately during testing.
401 WARN_ON_ONCE(smp_processor_id() != me);
402
Paul E. McKenneyaf2789d2020-05-26 11:22:03 -0700403 WRITE_ONCE(rt->start_reader, 0);
Paul E. McKenney86e0da22020-05-26 11:40:52 -0700404 if (!atomic_dec_return(&n_started))
405 while (atomic_read_acquire(&n_started))
406 cpu_relax();
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400407
Paul E. McKenneye76506f2020-11-15 10:24:52 -0800408 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx);
Paul E. McKenneyb864f892020-05-26 10:57:34 -0700409
Paul E. McKenney2db0bda2020-05-26 12:34:57 -0700410
411 // To reduce noise, do an initial cache-warming invocation, check
412 // in, and then keep warming until everyone has checked in.
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700413 rcu_scale_one_reader();
Paul E. McKenney2db0bda2020-05-26 12:34:57 -0700414 if (!atomic_dec_return(&n_warmedup))
415 while (atomic_read_acquire(&n_warmedup))
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700416 rcu_scale_one_reader();
Paul E. McKenney2db0bda2020-05-26 12:34:57 -0700417 // Also keep interrupts disabled. This also has the effect
418 // of preventing entries into slow path for rcu_read_unlock().
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400419 local_irq_save(flags);
420 start = ktime_get_mono_fast_ns();
421
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700422 rcu_scale_one_reader();
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400423
424 duration = ktime_get_mono_fast_ns() - start;
425 local_irq_restore(flags);
426
427 rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
Paul E. McKenney2db0bda2020-05-26 12:34:57 -0700428 // To reduce runtime-skew noise, do maintain-load invocations until
429 // everyone is done.
430 if (!atomic_dec_return(&n_cooleddown))
431 while (atomic_read_acquire(&n_cooleddown))
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700432 rcu_scale_one_reader();
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400433
Paul E. McKenneyb864f892020-05-26 10:57:34 -0700434 if (atomic_dec_and_test(&nreaders_exp))
435 wake_up(&main_wq);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400436
Paul E. McKenneye76506f2020-11-15 10:24:52 -0800437 VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
438 me, exp_idx, atomic_read(&nreaders_exp));
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400439
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400440 if (!torture_must_stop())
441 goto repeat;
442end:
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700443 torture_kthread_stopping("ref_scale_reader");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400444 return 0;
445}
446
Paul E. McKenney29907502020-05-26 09:32:57 -0700447static void reset_readers(void)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400448{
449 int i;
450 struct reader_task *rt;
451
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700452 for (i = 0; i < nreaders; i++) {
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400453 rt = &(reader_tasks[i]);
454
455 rt->last_duration_ns = 0;
456 }
457}
458
459// Print the results of each reader and return the sum of all their durations.
Paul E. McKenney29907502020-05-26 09:32:57 -0700460static u64 process_durations(int n)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400461{
462 int i;
463 struct reader_task *rt;
464 char buf1[64];
Paul E. McKenney2e90de72020-05-25 17:45:03 -0700465 char *buf;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400466 u64 sum = 0;
467
Paul E. McKenney2e90de72020-05-25 17:45:03 -0700468 buf = kmalloc(128 + nreaders * 32, GFP_KERNEL);
469 if (!buf)
470 return 0;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400471 buf[0] = 0;
472 sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
473 exp_idx);
474
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700475 for (i = 0; i < n && !torture_must_stop(); i++) {
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400476 rt = &(reader_tasks[i]);
477 sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns);
478
479 if (i % 5 == 0)
480 strcat(buf, "\n");
481 strcat(buf, buf1);
482
483 sum += rt->last_duration_ns;
484 }
485 strcat(buf, "\n");
486
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700487 SCALEOUT("%s\n", buf);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400488
Paul E. McKenney2e90de72020-05-25 17:45:03 -0700489 kfree(buf);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400490 return sum;
491}
492
493// The main_func is the main orchestrator, it performs a bunch of
494// experiments. For every experiment, it orders all the readers
495// involved to start and waits for them to finish the experiment. It
496// then reads their timestamps and starts the next experiment. Each
497// experiment progresses from 1 concurrent reader to N of them at which
498// point all the timestamps are printed.
499static int main_func(void *arg)
500{
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700501 bool errexit = false;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400502 int exp, r;
503 char buf1[64];
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700504 char *buf;
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700505 u64 *result_avg;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400506
507 set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
508 set_user_nice(current, MAX_NICE);
509
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700510 VERBOSE_SCALEOUT("main_func task started");
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700511 result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700512 buf = kzalloc(64 + nruns * 32, GFP_KERNEL);
513 if (!result_avg || !buf) {
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700514 VERBOSE_SCALEOUT_ERRSTRING("out of memory");
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700515 errexit = true;
516 }
Paul E. McKenney777a54c2020-05-25 14:16:44 -0700517 if (holdoff)
518 schedule_timeout_interruptible(holdoff * HZ);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400519
Paul E. McKenney96af8662020-05-27 16:46:56 -0700520 // Wait for all threads to start.
521 atomic_inc(&n_init);
522 while (atomic_read(&n_init) < nreaders + 1)
523 schedule_timeout_uninterruptible(1);
524
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400525 // Start exp readers up per experiment
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700526 for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700527 if (errexit)
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700528 break;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400529 if (torture_must_stop())
530 goto end;
531
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700532 reset_readers();
533 atomic_set(&nreaders_exp, nreaders);
Paul E. McKenney86e0da22020-05-26 11:40:52 -0700534 atomic_set(&n_started, nreaders);
Paul E. McKenney2db0bda2020-05-26 12:34:57 -0700535 atomic_set(&n_warmedup, nreaders);
536 atomic_set(&n_cooleddown, nreaders);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400537
538 exp_idx = exp;
539
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700540 for (r = 0; r < nreaders; r++) {
Paul E. McKenneyaf2789d2020-05-26 11:22:03 -0700541 smp_store_release(&reader_tasks[r].start_reader, 1);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400542 wake_up(&reader_tasks[r].wq);
543 }
544
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700545 VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers",
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700546 nreaders);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400547
548 wait_event(main_wq,
549 !atomic_read(&nreaders_exp) || torture_must_stop());
550
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700551 VERBOSE_SCALEOUT("main_func: experiment ended");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400552
553 if (torture_must_stop())
554 goto end;
555
Arnd Bergmann7c944d72020-05-29 14:36:26 -0700556 result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400557 }
558
559 // Print the average of all experiments
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700560 SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400561
Colin Ian King58db5782020-07-16 15:38:56 +0100562 if (!errexit) {
563 buf[0] = 0;
564 strcat(buf, "\n");
565 strcat(buf, "Runs\tTime(ns)\n");
566 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400567
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700568 for (exp = 0; exp < nruns; exp++) {
Arnd Bergmann7c944d72020-05-29 14:36:26 -0700569 u64 avg;
570 u32 rem;
571
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700572 if (errexit)
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700573 break;
Arnd Bergmann7c944d72020-05-29 14:36:26 -0700574 avg = div_u64_rem(result_avg[exp], 1000, &rem);
575 sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400576 strcat(buf, buf1);
577 }
578
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700579 if (!errexit)
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700580 SCALEOUT("%s", buf);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400581
582 // This will shutdown everything including us.
583 if (shutdown) {
584 shutdown_start = 1;
585 wake_up(&shutdown_wq);
586 }
587
588 // Wait for torture to stop us
589 while (!torture_must_stop())
590 schedule_timeout_uninterruptible(1);
591
592end:
593 torture_kthread_stopping("main_func");
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700594 kfree(result_avg);
595 kfree(buf);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400596 return 0;
597}
598
599static void
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700600ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400601{
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700602 pr_alert("%s" SCALE_FLAG
603 "--- %s: verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
Paul E. McKenneyb4d1e342020-05-28 16:37:35 -0700604 verbose, shutdown, holdoff, loops, nreaders, nruns, readdelay);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400605}
606
607static void
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700608ref_scale_cleanup(void)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400609{
610 int i;
611
612 if (torture_cleanup_begin())
613 return;
614
615 if (!cur_ops) {
616 torture_cleanup_end();
617 return;
618 }
619
620 if (reader_tasks) {
621 for (i = 0; i < nreaders; i++)
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700622 torture_stop_kthread("ref_scale_reader",
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400623 reader_tasks[i].task);
624 }
625 kfree(reader_tasks);
626
627 torture_stop_kthread("main_task", main_task);
628 kfree(main_task);
629
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700630 // Do scale-type-specific cleanup operations.
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400631 if (cur_ops->cleanup != NULL)
632 cur_ops->cleanup();
633
634 torture_cleanup_end();
635}
636
637// Shutdown kthread. Just waits to be awakened, then shuts down system.
638static int
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700639ref_scale_shutdown(void *arg)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400640{
641 wait_event(shutdown_wq, shutdown_start);
642
643 smp_mb(); // Wake before output.
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700644 ref_scale_cleanup();
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400645 kernel_power_off();
646
647 return -EINVAL;
648}
649
650static int __init
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700651ref_scale_init(void)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400652{
653 long i;
654 int firsterr = 0;
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700655 static struct ref_scale_ops *scale_ops[] = {
Paul E. McKenneye13ef442020-06-03 11:56:34 -0700656 &rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops,
657 &refcnt_ops, &rwlock_ops, &rwsem_ops,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400658 };
659
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700660 if (!torture_init_begin(scale_type, verbose))
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400661 return -EBUSY;
662
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700663 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
664 cur_ops = scale_ops[i];
665 if (strcmp(scale_type, cur_ops->name) == 0)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400666 break;
667 }
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700668 if (i == ARRAY_SIZE(scale_ops)) {
669 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
670 pr_alert("rcu-scale types:");
671 for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
672 pr_cont(" %s", scale_ops[i]->name);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400673 pr_cont("\n");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400674 firsterr = -EINVAL;
675 cur_ops = NULL;
676 goto unwind;
677 }
678 if (cur_ops->init)
679 cur_ops->init();
680
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700681 ref_scale_print_module_parms(cur_ops, "Start of test");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400682
683 // Shutdown task
684 if (shutdown) {
685 init_waitqueue_head(&shutdown_wq);
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700686 firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400687 shutdown_task);
688 if (firsterr)
689 goto unwind;
690 schedule_timeout_uninterruptible(1);
691 }
692
Paul E. McKenney8fc28782020-05-25 15:48:38 -0700693 // Reader tasks (default to ~75% of online CPUs).
694 if (nreaders < 0)
695 nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
Paul E. McKenney0c6d18d2020-08-27 09:58:19 -0700696 if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
697 loops = 1;
698 if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
699 nreaders = 1;
700 if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
701 nruns = 1;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400702 reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
703 GFP_KERNEL);
704 if (!reader_tasks) {
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700705 VERBOSE_SCALEOUT_ERRSTRING("out of memory");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400706 firsterr = -ENOMEM;
707 goto unwind;
708 }
709
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700710 VERBOSE_SCALEOUT("Starting %d reader threads\n", nreaders);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400711
712 for (i = 0; i < nreaders; i++) {
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700713 firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400714 reader_tasks[i].task);
715 if (firsterr)
716 goto unwind;
717
718 init_waitqueue_head(&(reader_tasks[i].wq));
719 }
720
721 // Main Task
722 init_waitqueue_head(&main_wq);
723 firsterr = torture_create_kthread(main_func, NULL, main_task);
724 if (firsterr)
725 goto unwind;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400726
727 torture_init_end();
728 return 0;
729
730unwind:
731 torture_init_end();
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700732 ref_scale_cleanup();
Paul E. McKenneybc80d352020-09-17 10:37:10 -0700733 if (shutdown) {
734 WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
735 kernel_power_off();
736 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400737 return firsterr;
738}
739
Paul E. McKenney1fbeb3a2020-06-17 11:53:53 -0700740module_init(ref_scale_init);
741module_exit(ref_scale_cleanup);