blob: 234bb0e84a8b9d71418da197dd56221a005b0132 [file] [log] [blame]
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -04001// SPDX-License-Identifier: GPL-2.0+
2//
3// Performance test comparing RCU vs other mechanisms
4// for acquiring references on objects.
5//
6// Copyright (C) Google, 2020.
7//
8// Author: Joel Fernandes <joel@joelfernandes.org>
9
10#define pr_fmt(fmt) fmt
11
12#include <linux/atomic.h>
13#include <linux/bitops.h>
14#include <linux/completion.h>
15#include <linux/cpu.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/kthread.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
25#include <linux/notifier.h>
26#include <linux/percpu.h>
27#include <linux/rcupdate.h>
28#include <linux/reboot.h>
29#include <linux/sched.h>
30#include <linux/spinlock.h>
31#include <linux/smp.h>
32#include <linux/stat.h>
33#include <linux/srcu.h>
34#include <linux/slab.h>
35#include <linux/torture.h>
36#include <linux/types.h>
37
38#include "rcu.h"
39
40#define PERF_FLAG "-ref-perf: "
41
42#define PERFOUT(s, x...) \
43 pr_alert("%s" PERF_FLAG s, perf_type, ## x)
44
45#define VERBOSE_PERFOUT(s, x...) \
46 do { if (verbose) pr_alert("%s" PERF_FLAG s, perf_type, ## x); } while (0)
47
48#define VERBOSE_PERFOUT_ERRSTRING(s, x...) \
49 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! " s, perf_type, ## x); } while (0)
50
51MODULE_LICENSE("GPL");
52MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
53
54static char *perf_type = "rcu";
55module_param(perf_type, charp, 0444);
56MODULE_PARM_DESC(perf_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
57
58torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
59
Paul E. McKenney777a54c2020-05-25 14:16:44 -070060// Wait until there are multiple CPUs before starting test.
61torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_PERF_TEST) ? 10 : 0,
62 "Holdoff time before test start (s)");
63// Number of loops per experiment, all readers execute operations concurrently.
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040064torture_param(long, loops, 10000000, "Number of loops per experiment.");
Paul E. McKenney8fc28782020-05-25 15:48:38 -070065// Number of readers, with -1 defaulting to about 75% of the CPUs.
66torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
67// Number of runs.
68torture_param(int, nruns, 30, "Number of experiments to run.");
69// Reader delay in nanoseconds, 0 for no delay.
70torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040071
72#ifdef MODULE
73# define REFPERF_SHUTDOWN 0
74#else
75# define REFPERF_SHUTDOWN 1
76#endif
77
78torture_param(bool, shutdown, REFPERF_SHUTDOWN,
79 "Shutdown at end of performance tests.");
80
81struct reader_task {
82 struct task_struct *task;
Paul E. McKenneyaf2789d2020-05-26 11:22:03 -070083 int start_reader;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040084 wait_queue_head_t wq;
85 u64 last_duration_ns;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040086};
87
88static struct task_struct *shutdown_task;
89static wait_queue_head_t shutdown_wq;
90
91static struct task_struct *main_task;
92static wait_queue_head_t main_wq;
93static int shutdown_start;
94
95static struct reader_task *reader_tasks;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -040096
97// Number of readers that are part of the current experiment.
98static atomic_t nreaders_exp;
99
100// Use to wait for all threads to start.
101static atomic_t n_init;
Paul E. McKenney86e0da22020-05-26 11:40:52 -0700102static atomic_t n_started;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400103
104// Track which experiment is currently running.
105static int exp_idx;
106
107// Operations vector for selecting different types of tests.
108struct ref_perf_ops {
109 void (*init)(void);
110 void (*cleanup)(void);
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700111 void (*readsection)(const int nloops);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400112 const char *name;
113};
114
115static struct ref_perf_ops *cur_ops;
116
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700117static void ref_rcu_read_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400118{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700119 int i;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400120
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700121 for (i = nloops; i >= 0; i--) {
122 rcu_read_lock();
123 rcu_read_unlock();
124 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400125}
126
127static void rcu_sync_perf_init(void)
128{
129}
130
131static struct ref_perf_ops rcu_ops = {
132 .init = rcu_sync_perf_init,
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700133 .readsection = ref_rcu_read_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400134 .name = "rcu"
135};
136
137
138// Definitions for SRCU ref perf testing.
139DEFINE_STATIC_SRCU(srcu_refctl_perf);
140static struct srcu_struct *srcu_ctlp = &srcu_refctl_perf;
141
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700142static void srcu_ref_perf_read_section(int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400143{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700144 int i;
145 int idx;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400146
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700147 for (i = nloops; i >= 0; i--) {
148 idx = srcu_read_lock(srcu_ctlp);
149 srcu_read_unlock(srcu_ctlp, idx);
150 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400151}
152
153static struct ref_perf_ops srcu_ops = {
154 .init = rcu_sync_perf_init,
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700155 .readsection = srcu_ref_perf_read_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400156 .name = "srcu"
157};
158
159// Definitions for reference count
160static atomic_t refcnt;
161
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700162static void ref_perf_refcnt_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400163{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700164 int i;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400165
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700166 for (i = nloops; i >= 0; i--) {
167 atomic_inc(&refcnt);
168 atomic_dec(&refcnt);
169 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400170}
171
172static struct ref_perf_ops refcnt_ops = {
173 .init = rcu_sync_perf_init,
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700174 .readsection = ref_perf_refcnt_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400175 .name = "refcnt"
176};
177
178// Definitions for rwlock
179static rwlock_t test_rwlock;
180
181static void ref_perf_rwlock_init(void)
182{
183 rwlock_init(&test_rwlock);
184}
185
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700186static void ref_perf_rwlock_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400187{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700188 int i;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400189
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700190 for (i = nloops; i >= 0; i--) {
191 read_lock(&test_rwlock);
192 read_unlock(&test_rwlock);
193 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400194}
195
196static struct ref_perf_ops rwlock_ops = {
197 .init = ref_perf_rwlock_init,
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700198 .readsection = ref_perf_rwlock_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400199 .name = "rwlock"
200};
201
202// Definitions for rwsem
203static struct rw_semaphore test_rwsem;
204
205static void ref_perf_rwsem_init(void)
206{
207 init_rwsem(&test_rwsem);
208}
209
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700210static void ref_perf_rwsem_section(const int nloops)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400211{
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700212 int i;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400213
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700214 for (i = nloops; i >= 0; i--) {
215 down_read(&test_rwsem);
216 up_read(&test_rwsem);
217 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400218}
219
220static struct ref_perf_ops rwsem_ops = {
221 .init = ref_perf_rwsem_init,
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700222 .readsection = ref_perf_rwsem_section,
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400223 .name = "rwsem"
224};
225
226// Reader kthread. Repeatedly does empty RCU read-side
227// critical section, minimizing update-side interference.
228static int
229ref_perf_reader(void *arg)
230{
231 unsigned long flags;
232 long me = (long)arg;
233 struct reader_task *rt = &(reader_tasks[me]);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400234 u64 start;
235 s64 duration;
236
237 VERBOSE_PERFOUT("ref_perf_reader %ld: task started", me);
238 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
239 set_user_nice(current, MAX_NICE);
240 atomic_inc(&n_init);
Paul E. McKenney777a54c2020-05-25 14:16:44 -0700241 if (holdoff)
242 schedule_timeout_interruptible(holdoff * HZ);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400243repeat:
244 VERBOSE_PERFOUT("ref_perf_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id());
245
246 // Wait for signal that this reader can start.
Paul E. McKenneyaf2789d2020-05-26 11:22:03 -0700247 wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400248 torture_must_stop());
249
250 if (torture_must_stop())
251 goto end;
252
253 // Make sure that the CPU is affinitized appropriately during testing.
254 WARN_ON_ONCE(smp_processor_id() != me);
255
Paul E. McKenneyaf2789d2020-05-26 11:22:03 -0700256 WRITE_ONCE(rt->start_reader, 0);
Paul E. McKenney86e0da22020-05-26 11:40:52 -0700257 if (!atomic_dec_return(&n_started))
258 while (atomic_read_acquire(&n_started))
259 cpu_relax();
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400260
Paul E. McKenneyb864f892020-05-26 10:57:34 -0700261 VERBOSE_PERFOUT("ref_perf_reader %ld: experiment %d started", me, exp_idx);
262
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400263 // To prevent noise, keep interrupts disabled. This also has the
264 // effect of preventing entries into slow path for rcu_read_unlock().
265 local_irq_save(flags);
266 start = ktime_get_mono_fast_ns();
267
Paul E. McKenney75dd8ef2020-05-25 14:59:06 -0700268 cur_ops->readsection(loops);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400269
270 duration = ktime_get_mono_fast_ns() - start;
271 local_irq_restore(flags);
272
273 rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
274
Paul E. McKenneyb864f892020-05-26 10:57:34 -0700275 if (atomic_dec_and_test(&nreaders_exp))
276 wake_up(&main_wq);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400277
278 VERBOSE_PERFOUT("ref_perf_reader %ld: experiment %d ended, (readers remaining=%d)",
279 me, exp_idx, atomic_read(&nreaders_exp));
280
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400281 if (!torture_must_stop())
282 goto repeat;
283end:
284 torture_kthread_stopping("ref_perf_reader");
285 return 0;
286}
287
Paul E. McKenney29907502020-05-26 09:32:57 -0700288static void reset_readers(void)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400289{
290 int i;
291 struct reader_task *rt;
292
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700293 for (i = 0; i < nreaders; i++) {
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400294 rt = &(reader_tasks[i]);
295
296 rt->last_duration_ns = 0;
297 }
298}
299
300// Print the results of each reader and return the sum of all their durations.
Paul E. McKenney29907502020-05-26 09:32:57 -0700301static u64 process_durations(int n)
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400302{
303 int i;
304 struct reader_task *rt;
305 char buf1[64];
Paul E. McKenney2e90de72020-05-25 17:45:03 -0700306 char *buf;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400307 u64 sum = 0;
308
Paul E. McKenney2e90de72020-05-25 17:45:03 -0700309 buf = kmalloc(128 + nreaders * 32, GFP_KERNEL);
310 if (!buf)
311 return 0;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400312 buf[0] = 0;
313 sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
314 exp_idx);
315
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700316 for (i = 0; i < n && !torture_must_stop(); i++) {
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400317 rt = &(reader_tasks[i]);
318 sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns);
319
320 if (i % 5 == 0)
321 strcat(buf, "\n");
322 strcat(buf, buf1);
323
324 sum += rt->last_duration_ns;
325 }
326 strcat(buf, "\n");
327
328 PERFOUT("%s\n", buf);
329
Paul E. McKenney2e90de72020-05-25 17:45:03 -0700330 kfree(buf);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400331 return sum;
332}
333
334// The main_func is the main orchestrator, it performs a bunch of
335// experiments. For every experiment, it orders all the readers
336// involved to start and waits for them to finish the experiment. It
337// then reads their timestamps and starts the next experiment. Each
338// experiment progresses from 1 concurrent reader to N of them at which
339// point all the timestamps are printed.
340static int main_func(void *arg)
341{
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700342 bool errexit = false;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400343 int exp, r;
344 char buf1[64];
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700345 char *buf;
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700346 u64 *result_avg;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400347
348 set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
349 set_user_nice(current, MAX_NICE);
350
351 VERBOSE_PERFOUT("main_func task started");
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700352 result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700353 buf = kzalloc(64 + nruns * 32, GFP_KERNEL);
354 if (!result_avg || !buf) {
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700355 VERBOSE_PERFOUT_ERRSTRING("out of memory");
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700356 errexit = true;
357 }
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400358 atomic_inc(&n_init);
359
360 // Wait for all threads to start.
361 wait_event(main_wq, atomic_read(&n_init) == (nreaders + 1));
Paul E. McKenney777a54c2020-05-25 14:16:44 -0700362 if (holdoff)
363 schedule_timeout_interruptible(holdoff * HZ);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400364
365 // Start exp readers up per experiment
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700366 for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700367 if (errexit)
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700368 break;
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400369 if (torture_must_stop())
370 goto end;
371
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700372 reset_readers();
373 atomic_set(&nreaders_exp, nreaders);
Paul E. McKenney86e0da22020-05-26 11:40:52 -0700374 atomic_set(&n_started, nreaders);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400375
376 exp_idx = exp;
377
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700378 for (r = 0; r < nreaders; r++) {
Paul E. McKenneyaf2789d2020-05-26 11:22:03 -0700379 smp_store_release(&reader_tasks[r].start_reader, 1);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400380 wake_up(&reader_tasks[r].wq);
381 }
382
383 VERBOSE_PERFOUT("main_func: experiment started, waiting for %d readers",
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700384 nreaders);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400385
386 wait_event(main_wq,
387 !atomic_read(&nreaders_exp) || torture_must_stop());
388
389 VERBOSE_PERFOUT("main_func: experiment ended");
390
391 if (torture_must_stop())
392 goto end;
393
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700394 result_avg[exp] = 1000 * process_durations(nreaders) / (nreaders * loops);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400395 }
396
397 // Print the average of all experiments
398 PERFOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
399
400 buf[0] = 0;
401 strcat(buf, "\n");
402 strcat(buf, "Threads\tTime(ns)\n");
403
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700404 for (exp = 0; exp < nruns; exp++) {
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700405 if (errexit)
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700406 break;
407 sprintf(buf1, "%d\t%llu.%03d\n", exp + 1, result_avg[exp] / 1000, (int)(result_avg[exp] % 1000));
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400408 strcat(buf, buf1);
409 }
410
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700411 if (!errexit)
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700412 PERFOUT("%s", buf);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400413
414 // This will shutdown everything including us.
415 if (shutdown) {
416 shutdown_start = 1;
417 wake_up(&shutdown_wq);
418 }
419
420 // Wait for torture to stop us
421 while (!torture_must_stop())
422 schedule_timeout_uninterruptible(1);
423
424end:
425 torture_kthread_stopping("main_func");
Paul E. McKenneyf518f152020-05-25 17:32:56 -0700426 kfree(result_avg);
427 kfree(buf);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400428 return 0;
429}
430
431static void
432ref_perf_print_module_parms(struct ref_perf_ops *cur_ops, const char *tag)
433{
434 pr_alert("%s" PERF_FLAG
Paul E. McKenneydbf28ef2020-05-25 17:22:24 -0700435 "--- %s: verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d\n", perf_type, tag,
436 verbose, shutdown, holdoff, loops, nreaders, nruns);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400437}
438
439static void
440ref_perf_cleanup(void)
441{
442 int i;
443
444 if (torture_cleanup_begin())
445 return;
446
447 if (!cur_ops) {
448 torture_cleanup_end();
449 return;
450 }
451
452 if (reader_tasks) {
453 for (i = 0; i < nreaders; i++)
454 torture_stop_kthread("ref_perf_reader",
455 reader_tasks[i].task);
456 }
457 kfree(reader_tasks);
458
459 torture_stop_kthread("main_task", main_task);
460 kfree(main_task);
461
462 // Do perf-type-specific cleanup operations.
463 if (cur_ops->cleanup != NULL)
464 cur_ops->cleanup();
465
466 torture_cleanup_end();
467}
468
469// Shutdown kthread. Just waits to be awakened, then shuts down system.
470static int
471ref_perf_shutdown(void *arg)
472{
473 wait_event(shutdown_wq, shutdown_start);
474
475 smp_mb(); // Wake before output.
476 ref_perf_cleanup();
477 kernel_power_off();
478
479 return -EINVAL;
480}
481
482static int __init
483ref_perf_init(void)
484{
485 long i;
486 int firsterr = 0;
487 static struct ref_perf_ops *perf_ops[] = {
488 &rcu_ops, &srcu_ops, &refcnt_ops, &rwlock_ops, &rwsem_ops,
489 };
490
491 if (!torture_init_begin(perf_type, verbose))
492 return -EBUSY;
493
494 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
495 cur_ops = perf_ops[i];
496 if (strcmp(perf_type, cur_ops->name) == 0)
497 break;
498 }
499 if (i == ARRAY_SIZE(perf_ops)) {
500 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
501 pr_alert("rcu-perf types:");
502 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
503 pr_cont(" %s", perf_ops[i]->name);
504 pr_cont("\n");
505 WARN_ON(!IS_MODULE(CONFIG_RCU_REF_PERF_TEST));
506 firsterr = -EINVAL;
507 cur_ops = NULL;
508 goto unwind;
509 }
510 if (cur_ops->init)
511 cur_ops->init();
512
513 ref_perf_print_module_parms(cur_ops, "Start of test");
514
515 // Shutdown task
516 if (shutdown) {
517 init_waitqueue_head(&shutdown_wq);
518 firsterr = torture_create_kthread(ref_perf_shutdown, NULL,
519 shutdown_task);
520 if (firsterr)
521 goto unwind;
522 schedule_timeout_uninterruptible(1);
523 }
524
Paul E. McKenney8fc28782020-05-25 15:48:38 -0700525 // Reader tasks (default to ~75% of online CPUs).
526 if (nreaders < 0)
527 nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
Joel Fernandes (Google)653ed642020-05-25 00:36:48 -0400528 reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
529 GFP_KERNEL);
530 if (!reader_tasks) {
531 VERBOSE_PERFOUT_ERRSTRING("out of memory");
532 firsterr = -ENOMEM;
533 goto unwind;
534 }
535
536 VERBOSE_PERFOUT("Starting %d reader threads\n", nreaders);
537
538 for (i = 0; i < nreaders; i++) {
539 firsterr = torture_create_kthread(ref_perf_reader, (void *)i,
540 reader_tasks[i].task);
541 if (firsterr)
542 goto unwind;
543
544 init_waitqueue_head(&(reader_tasks[i].wq));
545 }
546
547 // Main Task
548 init_waitqueue_head(&main_wq);
549 firsterr = torture_create_kthread(main_func, NULL, main_task);
550 if (firsterr)
551 goto unwind;
552 schedule_timeout_uninterruptible(1);
553
554
555 // Wait until all threads start
556 while (atomic_read(&n_init) < nreaders + 1)
557 schedule_timeout_uninterruptible(1);
558
559 wake_up(&main_wq);
560
561 torture_init_end();
562 return 0;
563
564unwind:
565 torture_init_end();
566 ref_perf_cleanup();
567 return firsterr;
568}
569
570module_init(ref_perf_init);
571module_exit(ref_perf_cleanup);