blob: e49fbe901cfc64c60734d52a0a7c8db320606b00 [file] [log] [blame]
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -04001#include <linux/delay.h>
2#include <linux/module.h>
3#include <linux/kthread.h>
4#include <linux/trace_clock.h>
5
6#define CREATE_TRACE_POINTS
7#include "trace_benchmark.h"
8
9static struct task_struct *bm_event_thread;
10
11static char bm_str[BENCHMARK_EVENT_STRLEN] = "START";
12
13static u64 bm_total;
14static u64 bm_totalsq;
15static u64 bm_last;
16static u64 bm_max;
17static u64 bm_min;
18static u64 bm_first;
Steven Rostedt (Red Hat)34839f52014-06-05 23:34:02 -040019static u64 bm_cnt;
20static u64 bm_stddev;
21static unsigned int bm_avg;
22static unsigned int bm_std;
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -040023
Steven Rostedt (Red Hat)9c1f6bb2016-11-28 17:48:07 -050024static bool ok_to_run;
25
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -040026/*
27 * This gets called in a loop recording the time it took to write
28 * the tracepoint. What it writes is the time statistics of the last
29 * tracepoint write. As there is nothing to write the first time
30 * it simply writes "START". As the first write is cold cache and
31 * the rest is hot, we save off that time in bm_first and it is
32 * reported as "first", which is shown in the second write to the
33 * tracepoint. The "first" field is writen within the statics from
34 * then on but never changes.
35 */
36static void trace_do_benchmark(void)
37{
38 u64 start;
39 u64 stop;
40 u64 delta;
Steven Rostedt (Red Hat)72e2fe32014-06-05 20:35:30 -040041 u64 stddev;
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -040042 u64 seed;
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -040043 u64 last_seed;
44 unsigned int avg;
45 unsigned int std = 0;
46
47 /* Only run if the tracepoint is actually active */
Chunyan Zhangbdb5d0f2015-10-27 20:12:13 +080048 if (!trace_benchmark_event_enabled() || !tracing_is_on())
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -040049 return;
50
51 local_irq_disable();
52 start = trace_clock_local();
53 trace_benchmark_event(bm_str);
54 stop = trace_clock_local();
55 local_irq_enable();
56
57 bm_cnt++;
58
59 delta = stop - start;
60
61 /*
62 * The first read is cold cached, keep it separate from the
63 * other calculations.
64 */
65 if (bm_cnt == 1) {
66 bm_first = delta;
67 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
68 "first=%llu [COLD CACHED]", bm_first);
69 return;
70 }
71
72 bm_last = delta;
73
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -040074 if (delta > bm_max)
75 bm_max = delta;
76 if (!bm_min || delta < bm_min)
77 bm_min = delta;
78
Steven Rostedt (Red Hat)34839f52014-06-05 23:34:02 -040079 /*
80 * When bm_cnt is greater than UINT_MAX, it breaks the statistics
81 * accounting. Freeze the statistics when that happens.
82 * We should have enough data for the avg and stddev anyway.
83 */
84 if (bm_cnt > UINT_MAX) {
85 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
86 "last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld",
87 bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev);
88 return;
89 }
90
91 bm_total += delta;
92 bm_totalsq += delta * delta;
93
94
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -040095 if (bm_cnt > 1) {
96 /*
97 * Apply Welford's method to calculate standard deviation:
98 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
99 */
100 stddev = (u64)bm_cnt * bm_totalsq - bm_total * bm_total;
Steven Rostedt (Red Hat)34839f52014-06-05 23:34:02 -0400101 do_div(stddev, (u32)bm_cnt);
102 do_div(stddev, (u32)bm_cnt - 1);
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -0400103 } else
104 stddev = 0;
105
106 delta = bm_total;
107 do_div(delta, bm_cnt);
108 avg = delta;
109
110 if (stddev > 0) {
111 int i = 0;
112 /*
113 * stddev is the square of standard deviation but
114 * we want the actualy number. Use the average
115 * as our seed to find the std.
116 *
117 * The next try is:
118 * x = (x + N/x) / 2
119 *
120 * Where N is the squared number to find the square
121 * root of.
122 */
123 seed = avg;
124 do {
125 last_seed = seed;
126 seed = stddev;
127 if (!last_seed)
128 break;
129 do_div(seed, last_seed);
130 seed += last_seed;
131 do_div(seed, 2);
132 } while (i++ < 10 && last_seed != seed);
133
134 std = seed;
135 }
136
137 scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
138 "last=%llu first=%llu max=%llu min=%llu avg=%u std=%d std^2=%lld",
139 bm_last, bm_first, bm_max, bm_min, avg, std, stddev);
Steven Rostedt (Red Hat)34839f52014-06-05 23:34:02 -0400140
141 bm_std = std;
142 bm_avg = avg;
143 bm_stddev = stddev;
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -0400144}
145
146static int benchmark_event_kthread(void *arg)
147{
148 /* sleep a bit to make sure the tracepoint gets activated */
149 msleep(100);
150
151 while (!kthread_should_stop()) {
152
153 trace_do_benchmark();
154
155 /*
156 * We don't go to sleep, but let others
157 * run as well.
158 */
159 cond_resched();
160 }
161
162 return 0;
163}
164
165/*
166 * When the benchmark tracepoint is enabled, it calls this
167 * function and the thread that calls the tracepoint is created.
168 */
Steven Rostedt (Red Hat)8cf868a2016-11-28 13:03:21 -0500169int trace_benchmark_reg(void)
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -0400170{
Steven Rostedt (Red Hat)9c1f6bb2016-11-28 17:48:07 -0500171 if (!ok_to_run) {
Steven Rostedt (Red Hat)1dd349a2016-11-28 13:17:25 -0500172 pr_warning("trace benchmark cannot be started via kernel command line\n");
173 return -EBUSY;
174 }
175
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -0400176 bm_event_thread = kthread_run(benchmark_event_kthread,
177 NULL, "event_benchmark");
Wei Yongjun8f0994b2017-01-12 13:55:02 +0000178 if (IS_ERR(bm_event_thread)) {
Steven Rostedt (Red Hat)1dd349a2016-11-28 13:17:25 -0500179 pr_warning("trace benchmark failed to create kernel thread\n");
Wei Yongjun8f0994b2017-01-12 13:55:02 +0000180 return PTR_ERR(bm_event_thread);
Steven Rostedt (Red Hat)1dd349a2016-11-28 13:17:25 -0500181 }
182
Steven Rostedt (Red Hat)8cf868a2016-11-28 13:03:21 -0500183 return 0;
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -0400184}
185
186/*
187 * When the benchmark tracepoint is disabled, it calls this
188 * function and the thread that calls the tracepoint is deleted
189 * and all the numbers are reset.
190 */
191void trace_benchmark_unreg(void)
192{
193 if (!bm_event_thread)
194 return;
195
196 kthread_stop(bm_event_thread);
Steven Rostedt (Red Hat)1dd349a2016-11-28 13:17:25 -0500197 bm_event_thread = NULL;
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -0400198
199 strcpy(bm_str, "START");
200 bm_total = 0;
201 bm_totalsq = 0;
202 bm_last = 0;
203 bm_max = 0;
204 bm_min = 0;
205 bm_cnt = 0;
Steven Rostedt (Red Hat)34839f52014-06-05 23:34:02 -0400206 /* These don't need to be reset but reset them anyway */
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -0400207 bm_first = 0;
Steven Rostedt (Red Hat)34839f52014-06-05 23:34:02 -0400208 bm_std = 0;
209 bm_avg = 0;
210 bm_stddev = 0;
Steven Rostedt (Red Hat)81dc9f02014-05-29 22:49:07 -0400211}
Steven Rostedt (Red Hat)9c1f6bb2016-11-28 17:48:07 -0500212
213static __init int ok_to_run_trace_benchmark(void)
214{
215 ok_to_run = true;
216
217 return 0;
218}
219
220early_initcall(ok_to_run_trace_benchmark);