blob: e98fca60974f57a36d8c2780e5164c62b10195a8 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02007
Ingo Molnare309b412008-05-12 21:20:51 +02008static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02009{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020013 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040015 case TRACE_PRINT:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020019 return 1;
20 }
21 return 0;
22}
23
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050024static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020025{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040026 struct ring_buffer_event *event;
27 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050028 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020029
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050030 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020032
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050033 /*
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
37 */
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
41 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040042 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020043 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040044 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020045 goto failed;
46 }
Steven Rostedt60a11772008-05-12 21:20:44 +020047 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 return 0;
49
50 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020051 /* disable tracing */
52 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020053 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050061static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
Steven Rostedt60a11772008-05-12 21:20:44 +020062{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020063 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020065
Steven Rostedt30afdcb2008-05-12 21:20:56 +020066 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050067 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010068 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040069
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050070 cnt = ring_buffer_entries(buf->buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040071
Steven Rostedt0c5119c2009-02-18 18:33:57 -050072 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
78 */
79 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020080 for_each_possible_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050081 ret = trace_test_buffer_cpu(buf, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020082 if (ret)
83 break;
84 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050085 tracing_on();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010086 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050087 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020088
89 if (count)
90 *count = cnt;
91
92 return ret;
93}
94
Frederic Weisbecker1c800252008-11-16 05:57:26 +010095static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
99}
Steven Rostedt606576c2008-10-06 19:06:12 -0400100#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
Steven Rostedt95950c22011-05-06 00:08:51 -0400104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400106 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400109{
110 trace_selftest_test_probe1_cnt++;
111}
112
113static int trace_selftest_test_probe2_cnt;
114static void trace_selftest_test_probe2_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400115 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400118{
119 trace_selftest_test_probe2_cnt++;
120}
121
122static int trace_selftest_test_probe3_cnt;
123static void trace_selftest_test_probe3_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400124 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400127{
128 trace_selftest_test_probe3_cnt++;
129}
130
131static int trace_selftest_test_global_cnt;
132static void trace_selftest_test_global_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400133 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400136{
137 trace_selftest_test_global_cnt++;
138}
139
140static int trace_selftest_test_dyn_cnt;
141static void trace_selftest_test_dyn_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400142 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400145{
146 trace_selftest_test_dyn_cnt++;
147}
148
149static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400152};
153
154static struct ftrace_ops test_probe2 = {
155 .func = trace_selftest_test_probe2_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400157};
158
159static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400162};
163
164static struct ftrace_ops test_global = {
Steven Rostedt47409742012-07-20 11:04:44 -0400165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400167};
168
169static void print_counts(void)
170{
171 printk("(%d %d %d %d %d) ",
172 trace_selftest_test_probe1_cnt,
173 trace_selftest_test_probe2_cnt,
174 trace_selftest_test_probe3_cnt,
175 trace_selftest_test_global_cnt,
176 trace_selftest_test_dyn_cnt);
177}
178
179static void reset_counts(void)
180{
181 trace_selftest_test_probe1_cnt = 0;
182 trace_selftest_test_probe2_cnt = 0;
183 trace_selftest_test_probe3_cnt = 0;
184 trace_selftest_test_global_cnt = 0;
185 trace_selftest_test_dyn_cnt = 0;
186}
187
188static int trace_selftest_ops(int cnt)
189{
190 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops;
192 char *func1_name;
193 char *func2_name;
194 int len1;
195 int len2;
196 int ret = -1;
197
198 printk(KERN_CONT "PASSED\n");
199 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
200
201 ftrace_enabled = 1;
202 reset_counts();
203
204 /* Handle PPC64 '.' name */
205 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
206 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
207 len1 = strlen(func1_name);
208 len2 = strlen(func2_name);
209
210 /*
211 * Probe 1 will trace function 1.
212 * Probe 2 will trace function 2.
213 * Probe 3 will trace functions 1 and 2.
214 */
215 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
216 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
217 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
218 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
219
220 register_ftrace_function(&test_probe1);
221 register_ftrace_function(&test_probe2);
222 register_ftrace_function(&test_probe3);
223 register_ftrace_function(&test_global);
224
225 DYN_FTRACE_TEST_NAME();
226
227 print_counts();
228
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
237
238 DYN_FTRACE_TEST_NAME2();
239
240 print_counts();
241
242 if (trace_selftest_test_probe1_cnt != 1)
243 goto out;
244 if (trace_selftest_test_probe2_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe3_cnt != 2)
247 goto out;
248
249 /* Add a dynamic probe */
250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 if (!dyn_ops) {
252 printk("MEMORY ERROR ");
253 goto out;
254 }
255
256 dyn_ops->func = trace_selftest_test_dyn_func;
257
258 register_ftrace_function(dyn_ops);
259
260 trace_selftest_test_global_cnt = 0;
261
262 DYN_FTRACE_TEST_NAME();
263
264 print_counts();
265
266 if (trace_selftest_test_probe1_cnt != 2)
267 goto out_free;
268 if (trace_selftest_test_probe2_cnt != 1)
269 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free;
272 if (trace_selftest_test_global_cnt == 0)
273 goto out;
274 if (trace_selftest_test_dyn_cnt == 0)
275 goto out_free;
276
277 DYN_FTRACE_TEST_NAME2();
278
279 print_counts();
280
281 if (trace_selftest_test_probe1_cnt != 2)
282 goto out_free;
283 if (trace_selftest_test_probe2_cnt != 2)
284 goto out_free;
285 if (trace_selftest_test_probe3_cnt != 4)
286 goto out_free;
287
288 ret = 0;
289 out_free:
290 unregister_ftrace_function(dyn_ops);
291 kfree(dyn_ops);
292
293 out:
294 /* Purposely unregister in the same order */
295 unregister_ftrace_function(&test_probe1);
296 unregister_ftrace_function(&test_probe2);
297 unregister_ftrace_function(&test_probe3);
298 unregister_ftrace_function(&test_global);
299
300 /* Make sure everything is off */
301 reset_counts();
302 DYN_FTRACE_TEST_NAME();
303 DYN_FTRACE_TEST_NAME();
304
305 if (trace_selftest_test_probe1_cnt ||
306 trace_selftest_test_probe2_cnt ||
307 trace_selftest_test_probe3_cnt ||
308 trace_selftest_test_global_cnt ||
309 trace_selftest_test_dyn_cnt)
310 ret = -1;
311
312 ftrace_enabled = save_ftrace_enabled;
313
314 return ret;
315}
316
Steven Rostedt77a2b372008-05-12 21:20:45 +0200317/* Test dynamic code modification and ftrace filters */
318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
319 struct trace_array *tr,
320 int (*func)(void))
321{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200322 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400323 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400324 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400325 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200326
327 /* The ftrace test PASSED */
328 printk(KERN_CONT "PASSED\n");
329 pr_info("Testing dynamic ftrace: ");
330
331 /* enable tracing, and record the filter function */
332 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200333
334 /* passed in by parameter to fool gcc from optimizing */
335 func();
336
Steven Rostedt4e491d12008-05-14 23:49:44 -0400337 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500338 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400339 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500340 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400341 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400342 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400343
Steven Rostedt77a2b372008-05-12 21:20:45 +0200344 /* filter only on our function */
Steven Rostedt936e0742011-05-05 22:54:01 -0400345 ftrace_set_global_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200346
347 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200348 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100349 if (ret) {
350 warn_failed_init_tracer(trace, ret);
351 goto out;
352 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400353
Steven Rostedt77a2b372008-05-12 21:20:45 +0200354 /* Sleep for a 1/10 of a second */
355 msleep(100);
356
357 /* we should have nothing in the buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500358 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200359 if (ret)
360 goto out;
361
362 if (count) {
363 ret = -1;
364 printk(KERN_CONT ".. filter did not filter .. ");
365 goto out;
366 }
367
368 /* call our function again */
369 func();
370
371 /* sleep again */
372 msleep(100);
373
374 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500375 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200376 ftrace_enabled = 0;
377
378 /* check the trace buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500379 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500380 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200381
382 /* we should only have one item */
383 if (!ret && count != 1) {
Steven Rostedt95950c22011-05-06 00:08:51 -0400384 trace->reset(tr);
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200385 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200386 ret = -1;
387 goto out;
388 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500389
Steven Rostedt95950c22011-05-06 00:08:51 -0400390 /* Test the ops with global tracing running */
391 ret = trace_selftest_ops(1);
392 trace->reset(tr);
393
Steven Rostedt77a2b372008-05-12 21:20:45 +0200394 out:
395 ftrace_enabled = save_ftrace_enabled;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200396
397 /* Enable tracing on all functions again */
Steven Rostedt936e0742011-05-05 22:54:01 -0400398 ftrace_set_global_filter(NULL, 0, 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200399
Steven Rostedt95950c22011-05-06 00:08:51 -0400400 /* Test the ops with global tracing off */
401 if (!ret)
402 ret = trace_selftest_ops(2);
403
Steven Rostedt77a2b372008-05-12 21:20:45 +0200404 return ret;
405}
Steven Rostedtea701f12012-07-20 13:08:05 -0400406
407static int trace_selftest_recursion_cnt;
408static void trace_selftest_test_recursion_func(unsigned long ip,
409 unsigned long pip,
410 struct ftrace_ops *op,
411 struct pt_regs *pt_regs)
412{
413 /*
414 * This function is registered without the recursion safe flag.
415 * The ftrace infrastructure should provide the recursion
416 * protection. If not, this will crash the kernel!
417 */
Steven Rostedt96403882012-11-02 17:01:20 -0400418 if (trace_selftest_recursion_cnt++ > 10)
419 return;
Steven Rostedtea701f12012-07-20 13:08:05 -0400420 DYN_FTRACE_TEST_NAME();
421}
422
423static void trace_selftest_test_recursion_safe_func(unsigned long ip,
424 unsigned long pip,
425 struct ftrace_ops *op,
426 struct pt_regs *pt_regs)
427{
428 /*
429 * We said we would provide our own recursion. By calling
430 * this function again, we should recurse back into this function
431 * and count again. But this only happens if the arch supports
432 * all of ftrace features and nothing else is using the function
433 * tracing utility.
434 */
435 if (trace_selftest_recursion_cnt++)
436 return;
437 DYN_FTRACE_TEST_NAME();
438}
439
440static struct ftrace_ops test_rec_probe = {
441 .func = trace_selftest_test_recursion_func,
442};
443
444static struct ftrace_ops test_recsafe_probe = {
445 .func = trace_selftest_test_recursion_safe_func,
446 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
447};
448
449static int
450trace_selftest_function_recursion(void)
451{
452 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtea701f12012-07-20 13:08:05 -0400453 char *func_name;
454 int len;
455 int ret;
Steven Rostedtea701f12012-07-20 13:08:05 -0400456
457 /* The previous test PASSED */
458 pr_cont("PASSED\n");
459 pr_info("Testing ftrace recursion: ");
460
461
462 /* enable tracing, and record the filter function */
463 ftrace_enabled = 1;
Steven Rostedtea701f12012-07-20 13:08:05 -0400464
465 /* Handle PPC64 '.' name */
466 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
467 len = strlen(func_name);
468
469 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
470 if (ret) {
471 pr_cont("*Could not set filter* ");
472 goto out;
473 }
474
475 ret = register_ftrace_function(&test_rec_probe);
476 if (ret) {
477 pr_cont("*could not register callback* ");
478 goto out;
479 }
480
481 DYN_FTRACE_TEST_NAME();
482
483 unregister_ftrace_function(&test_rec_probe);
484
485 ret = -1;
486 if (trace_selftest_recursion_cnt != 1) {
487 pr_cont("*callback not called once (%d)* ",
488 trace_selftest_recursion_cnt);
489 goto out;
490 }
491
492 trace_selftest_recursion_cnt = 1;
493
494 pr_cont("PASSED\n");
495 pr_info("Testing ftrace recursion safe: ");
496
497 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
498 if (ret) {
499 pr_cont("*Could not set filter* ");
500 goto out;
501 }
502
503 ret = register_ftrace_function(&test_recsafe_probe);
504 if (ret) {
505 pr_cont("*could not register callback* ");
506 goto out;
507 }
508
509 DYN_FTRACE_TEST_NAME();
510
511 unregister_ftrace_function(&test_recsafe_probe);
512
Steven Rostedtea701f12012-07-20 13:08:05 -0400513 ret = -1;
Steven Rostedt05cbbf62013-01-22 23:35:11 -0500514 if (trace_selftest_recursion_cnt != 2) {
515 pr_cont("*callback not called expected 2 times (%d)* ",
516 trace_selftest_recursion_cnt);
Steven Rostedtea701f12012-07-20 13:08:05 -0400517 goto out;
518 }
519
520 ret = 0;
521out:
522 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtea701f12012-07-20 13:08:05 -0400523
524 return ret;
525}
Steven Rostedt77a2b372008-05-12 21:20:45 +0200526#else
527# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
Steven Rostedtea701f12012-07-20 13:08:05 -0400528# define trace_selftest_function_recursion() ({ 0; })
Steven Rostedt77a2b372008-05-12 21:20:45 +0200529#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100530
Steven Rostedtad977722012-07-20 13:45:59 -0400531static enum {
532 TRACE_SELFTEST_REGS_START,
533 TRACE_SELFTEST_REGS_FOUND,
534 TRACE_SELFTEST_REGS_NOT_FOUND,
535} trace_selftest_regs_stat;
536
537static void trace_selftest_test_regs_func(unsigned long ip,
538 unsigned long pip,
539 struct ftrace_ops *op,
540 struct pt_regs *pt_regs)
541{
542 if (pt_regs)
543 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
544 else
545 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
546}
547
548static struct ftrace_ops test_regs_probe = {
549 .func = trace_selftest_test_regs_func,
550 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
551};
552
553static int
554trace_selftest_function_regs(void)
555{
556 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtad977722012-07-20 13:45:59 -0400557 char *func_name;
558 int len;
559 int ret;
560 int supported = 0;
561
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +0900562#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
Steven Rostedtad977722012-07-20 13:45:59 -0400563 supported = 1;
564#endif
565
566 /* The previous test PASSED */
567 pr_cont("PASSED\n");
568 pr_info("Testing ftrace regs%s: ",
569 !supported ? "(no arch support)" : "");
570
571 /* enable tracing, and record the filter function */
572 ftrace_enabled = 1;
Steven Rostedtad977722012-07-20 13:45:59 -0400573
574 /* Handle PPC64 '.' name */
575 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
576 len = strlen(func_name);
577
578 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
579 /*
580 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
581 * This test really doesn't care.
582 */
583 if (ret && ret != -ENODEV) {
584 pr_cont("*Could not set filter* ");
585 goto out;
586 }
587
588 ret = register_ftrace_function(&test_regs_probe);
589 /*
590 * Now if the arch does not support passing regs, then this should
591 * have failed.
592 */
593 if (!supported) {
594 if (!ret) {
595 pr_cont("*registered save-regs without arch support* ");
596 goto out;
597 }
598 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
599 ret = register_ftrace_function(&test_regs_probe);
600 }
601 if (ret) {
602 pr_cont("*could not register callback* ");
603 goto out;
604 }
605
606
607 DYN_FTRACE_TEST_NAME();
608
609 unregister_ftrace_function(&test_regs_probe);
610
611 ret = -1;
612
613 switch (trace_selftest_regs_stat) {
614 case TRACE_SELFTEST_REGS_START:
615 pr_cont("*callback never called* ");
616 goto out;
617
618 case TRACE_SELFTEST_REGS_FOUND:
619 if (supported)
620 break;
621 pr_cont("*callback received regs without arch support* ");
622 goto out;
623
624 case TRACE_SELFTEST_REGS_NOT_FOUND:
625 if (!supported)
626 break;
627 pr_cont("*callback received NULL regs* ");
628 goto out;
629 }
630
631 ret = 0;
632out:
633 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad977722012-07-20 13:45:59 -0400634
635 return ret;
636}
637
Steven Rostedt60a11772008-05-12 21:20:44 +0200638/*
639 * Simple verification test of ftrace function tracer.
640 * Enable ftrace, sleep 1/10 second, and then read the trace
641 * buffer to see if all is in order.
642 */
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -0400643__init int
Steven Rostedt60a11772008-05-12 21:20:44 +0200644trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
645{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200646 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400647 unsigned long count;
648 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200649
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -0400650#ifdef CONFIG_DYNAMIC_FTRACE
651 if (ftrace_filter_param) {
652 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
653 return 0;
654 }
655#endif
656
Steven Rostedt77a2b372008-05-12 21:20:45 +0200657 /* make sure msleep has been recorded */
658 msleep(1);
659
Steven Rostedt60a11772008-05-12 21:20:44 +0200660 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200661 ftrace_enabled = 1;
662
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200663 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100664 if (ret) {
665 warn_failed_init_tracer(trace, ret);
666 goto out;
667 }
668
Steven Rostedt60a11772008-05-12 21:20:44 +0200669 /* Sleep for a 1/10 of a second */
670 msleep(100);
671 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500672 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200673 ftrace_enabled = 0;
674
Steven Rostedt60a11772008-05-12 21:20:44 +0200675 /* check the trace buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500676 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +0200677 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500678 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200679
680 if (!ret && !count) {
681 printk(KERN_CONT ".. no entries found ..");
682 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200683 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200684 }
685
Steven Rostedt77a2b372008-05-12 21:20:45 +0200686 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
687 DYN_FTRACE_TEST_NAME);
Steven Rostedtea701f12012-07-20 13:08:05 -0400688 if (ret)
689 goto out;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200690
Steven Rostedtea701f12012-07-20 13:08:05 -0400691 ret = trace_selftest_function_recursion();
Steven Rostedtad977722012-07-20 13:45:59 -0400692 if (ret)
693 goto out;
694
695 ret = trace_selftest_function_regs();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200696 out:
697 ftrace_enabled = save_ftrace_enabled;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200698
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200699 /* kill ftrace totally if we failed */
700 if (ret)
701 ftrace_kill();
702
Steven Rostedt60a11772008-05-12 21:20:44 +0200703 return ret;
704}
Steven Rostedt606576c2008-10-06 19:06:12 -0400705#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200706
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100707
708#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100709
710/* Maximum number of functions to trace before diagnosing a hang */
711#define GRAPH_MAX_FUNC_TEST 100000000
712
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100713static unsigned int graph_hang_thresh;
714
715/* Wrap the real function entry probe to avoid possible hanging */
716static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
717{
718 /* This is harmlessly racy, we want to approximately detect a hang */
719 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
720 ftrace_graph_stop();
721 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -0400722 if (ftrace_dump_on_oops) {
723 ftrace_dump(DUMP_ALL);
724 /* ftrace_dump() disables tracing */
725 tracing_on();
726 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100727 return 0;
728 }
729
730 return trace_graph_entry(trace);
731}
732
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100733/*
734 * Pretty much the same than for the function tracer from which the selftest
735 * has been borrowed.
736 */
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -0400737__init int
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100738trace_selftest_startup_function_graph(struct tracer *trace,
739 struct trace_array *tr)
740{
741 int ret;
742 unsigned long count;
743
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -0400744#ifdef CONFIG_DYNAMIC_FTRACE
745 if (ftrace_filter_param) {
746 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
747 return 0;
748 }
749#endif
750
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100751 /*
752 * Simulate the init() callback but we attach a watchdog callback
753 * to detect and recover from possible hangs
754 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500755 tracing_reset_online_cpus(&tr->trace_buffer);
Frederic Weisbecker1a0799a2009-07-29 18:59:58 +0200756 set_graph_array(tr);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100757 ret = register_ftrace_graph(&trace_graph_return,
758 &trace_graph_entry_watchdog);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100759 if (ret) {
760 warn_failed_init_tracer(trace, ret);
761 goto out;
762 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100763 tracing_start_cmdline_record();
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100764
765 /* Sleep for a 1/10 of a second */
766 msleep(100);
767
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100768 /* Have we just recovered from a hang? */
769 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
Frederic Weisbecker0cf53ff2009-03-22 15:13:07 +0100770 tracing_selftest_disabled = true;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100771 ret = -1;
772 goto out;
773 }
774
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100775 tracing_stop();
776
777 /* check the trace buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500778 ret = trace_test_buffer(&tr->trace_buffer, &count);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100779
780 trace->reset(tr);
781 tracing_start();
782
783 if (!ret && !count) {
784 printk(KERN_CONT ".. no entries found ..");
785 ret = -1;
786 goto out;
787 }
788
789 /* Don't test dynamic tracing, the function tracer already did */
790
791out:
792 /* Stop it if we failed */
793 if (ret)
794 ftrace_graph_stop();
795
796 return ret;
797}
798#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
799
800
Steven Rostedt60a11772008-05-12 21:20:44 +0200801#ifdef CONFIG_IRQSOFF_TRACER
802int
803trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
804{
805 unsigned long save_max = tracing_max_latency;
806 unsigned long count;
807 int ret;
808
809 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200810 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100811 if (ret) {
812 warn_failed_init_tracer(trace, ret);
813 return ret;
814 }
815
Steven Rostedt60a11772008-05-12 21:20:44 +0200816 /* reset the max latency */
817 tracing_max_latency = 0;
818 /* disable interrupts for a bit */
819 local_irq_disable();
820 udelay(100);
821 local_irq_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100822
823 /*
824 * Stop the tracer to avoid a warning subsequent
825 * to buffer flipping failure because tracing_stop()
826 * disables the tr and max buffers, making flipping impossible
827 * in case of parallels max irqs off latencies.
828 */
829 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200830 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500831 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200832 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500833 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Steven Rostedt60a11772008-05-12 21:20:44 +0200834 if (!ret)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500835 ret = trace_test_buffer(&tr->max_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +0200836 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500837 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200838
839 if (!ret && !count) {
840 printk(KERN_CONT ".. no entries found ..");
841 ret = -1;
842 }
843
844 tracing_max_latency = save_max;
845
846 return ret;
847}
848#endif /* CONFIG_IRQSOFF_TRACER */
849
850#ifdef CONFIG_PREEMPT_TRACER
851int
852trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
853{
854 unsigned long save_max = tracing_max_latency;
855 unsigned long count;
856 int ret;
857
Steven Rostedt769c48e2008-11-07 22:36:02 -0500858 /*
859 * Now that the big kernel lock is no longer preemptable,
860 * and this is called with the BKL held, it will always
861 * fail. If preemption is already disabled, simply
862 * pass the test. When the BKL is removed, or becomes
863 * preemptible again, we will once again test this,
864 * so keep it in.
865 */
866 if (preempt_count()) {
867 printk(KERN_CONT "can not test ... force ");
868 return 0;
869 }
870
Steven Rostedt60a11772008-05-12 21:20:44 +0200871 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200872 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100873 if (ret) {
874 warn_failed_init_tracer(trace, ret);
875 return ret;
876 }
877
Steven Rostedt60a11772008-05-12 21:20:44 +0200878 /* reset the max latency */
879 tracing_max_latency = 0;
880 /* disable preemption for a bit */
881 preempt_disable();
882 udelay(100);
883 preempt_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100884
885 /*
886 * Stop the tracer to avoid a warning subsequent
887 * to buffer flipping failure because tracing_stop()
888 * disables the tr and max buffers, making flipping impossible
889 * in case of parallels max preempt off latencies.
890 */
891 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200892 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500893 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200894 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500895 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Steven Rostedt60a11772008-05-12 21:20:44 +0200896 if (!ret)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500897 ret = trace_test_buffer(&tr->max_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +0200898 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500899 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200900
901 if (!ret && !count) {
902 printk(KERN_CONT ".. no entries found ..");
903 ret = -1;
904 }
905
906 tracing_max_latency = save_max;
907
908 return ret;
909}
910#endif /* CONFIG_PREEMPT_TRACER */
911
912#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
913int
914trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
915{
916 unsigned long save_max = tracing_max_latency;
917 unsigned long count;
918 int ret;
919
Steven Rostedt769c48e2008-11-07 22:36:02 -0500920 /*
921 * Now that the big kernel lock is no longer preemptable,
922 * and this is called with the BKL held, it will always
923 * fail. If preemption is already disabled, simply
924 * pass the test. When the BKL is removed, or becomes
925 * preemptible again, we will once again test this,
926 * so keep it in.
927 */
928 if (preempt_count()) {
929 printk(KERN_CONT "can not test ... force ");
930 return 0;
931 }
932
Steven Rostedt60a11772008-05-12 21:20:44 +0200933 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200934 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100935 if (ret) {
936 warn_failed_init_tracer(trace, ret);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100937 goto out_no_start;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100938 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200939
940 /* reset the max latency */
941 tracing_max_latency = 0;
942
943 /* disable preemption and interrupts for a bit */
944 preempt_disable();
945 local_irq_disable();
946 udelay(100);
947 preempt_enable();
948 /* reverse the order of preempt vs irqs */
949 local_irq_enable();
950
Frederic Weisbecker49036202009-03-17 22:38:58 +0100951 /*
952 * Stop the tracer to avoid a warning subsequent
953 * to buffer flipping failure because tracing_stop()
954 * disables the tr and max buffers, making flipping impossible
955 * in case of parallels max irqs/preempt off latencies.
956 */
957 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200958 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500959 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200960 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500961 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100962 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200963 goto out;
964
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500965 ret = trace_test_buffer(&tr->max_buffer, &count);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100966 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200967 goto out;
968
969 if (!ret && !count) {
970 printk(KERN_CONT ".. no entries found ..");
971 ret = -1;
972 goto out;
973 }
974
975 /* do the test by disabling interrupts first this time */
976 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500977 tracing_start();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100978 trace->start(tr);
979
Steven Rostedt60a11772008-05-12 21:20:44 +0200980 preempt_disable();
981 local_irq_disable();
982 udelay(100);
983 preempt_enable();
984 /* reverse the order of preempt vs irqs */
985 local_irq_enable();
986
Frederic Weisbecker49036202009-03-17 22:38:58 +0100987 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200988 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500989 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200990 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500991 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Steven Rostedt60a11772008-05-12 21:20:44 +0200992 if (ret)
993 goto out;
994
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500995 ret = trace_test_buffer(&tr->max_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +0200996
997 if (!ret && !count) {
998 printk(KERN_CONT ".. no entries found ..");
999 ret = -1;
1000 goto out;
1001 }
1002
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +01001003out:
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001004 tracing_start();
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +01001005out_no_start:
1006 trace->reset(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +02001007 tracing_max_latency = save_max;
1008
1009 return ret;
1010}
1011#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1012
Steven Noonanfb1b6d82008-09-19 03:06:43 -07001013#ifdef CONFIG_NOP_TRACER
1014int
1015trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1016{
1017 /* What could possibly go wrong? */
1018 return 0;
1019}
1020#endif
1021
Steven Rostedt60a11772008-05-12 21:20:44 +02001022#ifdef CONFIG_SCHED_TRACER
1023static int trace_wakeup_test_thread(void *data)
1024{
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001025 /* Make this a -deadline thread */
1026 static const struct sched_attr attr = {
1027 .sched_policy = SCHED_DEADLINE,
1028 .sched_runtime = 100000ULL,
1029 .sched_deadline = 10000000ULL,
1030 .sched_period = 10000000ULL
1031 };
Steven Rostedt60a11772008-05-12 21:20:44 +02001032 struct completion *x = data;
1033
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001034 sched_setattr(current, &attr);
Steven Rostedt60a11772008-05-12 21:20:44 +02001035
1036 /* Make it know we have a new prio */
1037 complete(x);
1038
1039 /* now go to sleep and let the test wake us up */
1040 set_current_state(TASK_INTERRUPTIBLE);
1041 schedule();
1042
Steven Rostedt3c18c102012-07-31 10:23:37 -04001043 complete(x);
1044
Steven Rostedt60a11772008-05-12 21:20:44 +02001045 /* we are awake, now wait to disappear */
1046 while (!kthread_should_stop()) {
1047 /*
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001048 * This will likely be the system top priority
1049 * task, do short sleeps to let others run.
Steven Rostedt60a11772008-05-12 21:20:44 +02001050 */
1051 msleep(100);
1052 }
1053
1054 return 0;
1055}
1056
1057int
1058trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1059{
1060 unsigned long save_max = tracing_max_latency;
1061 struct task_struct *p;
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001062 struct completion is_ready;
Steven Rostedt60a11772008-05-12 21:20:44 +02001063 unsigned long count;
1064 int ret;
1065
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001066 init_completion(&is_ready);
Steven Rostedt60a11772008-05-12 21:20:44 +02001067
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001068 /* create a -deadline thread */
1069 p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001070 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +02001071 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1072 return -1;
1073 }
1074
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001075 /* make sure the thread is running at -deadline policy */
1076 wait_for_completion(&is_ready);
Steven Rostedt60a11772008-05-12 21:20:44 +02001077
1078 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001079 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001080 if (ret) {
1081 warn_failed_init_tracer(trace, ret);
1082 return ret;
1083 }
1084
Steven Rostedt60a11772008-05-12 21:20:44 +02001085 /* reset the max latency */
1086 tracing_max_latency = 0;
1087
Steven Rostedt3c18c102012-07-31 10:23:37 -04001088 while (p->on_rq) {
1089 /*
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001090 * Sleep to make sure the -deadline thread is asleep too.
Steven Rostedt3c18c102012-07-31 10:23:37 -04001091 * On virtual machines we can't rely on timings,
1092 * but we want to make sure this test still works.
1093 */
1094 msleep(100);
1095 }
Steven Rostedt60a11772008-05-12 21:20:44 +02001096
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001097 init_completion(&is_ready);
Steven Rostedt60a11772008-05-12 21:20:44 +02001098
1099 wake_up_process(p);
1100
Steven Rostedt3c18c102012-07-31 10:23:37 -04001101 /* Wait for the task to wake up */
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001102 wait_for_completion(&is_ready);
Steven Rostedt5aa60c62008-09-29 23:02:37 -04001103
Steven Rostedt60a11772008-05-12 21:20:44 +02001104 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001105 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +02001106 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001107 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001108 printk("ret = %d\n", ret);
Steven Rostedt60a11772008-05-12 21:20:44 +02001109 if (!ret)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001110 ret = trace_test_buffer(&tr->max_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +02001111
1112
1113 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001114 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +02001115
1116 tracing_max_latency = save_max;
1117
1118 /* kill the thread */
1119 kthread_stop(p);
1120
1121 if (!ret && !count) {
1122 printk(KERN_CONT ".. no entries found ..");
1123 ret = -1;
1124 }
1125
1126 return ret;
1127}
1128#endif /* CONFIG_SCHED_TRACER */
1129
1130#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1131int
1132trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1133{
1134 unsigned long count;
1135 int ret;
1136
1137 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001138 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001139 if (ret) {
1140 warn_failed_init_tracer(trace, ret);
1141 return ret;
1142 }
1143
Steven Rostedt60a11772008-05-12 21:20:44 +02001144 /* Sleep for a 1/10 of a second */
1145 msleep(100);
1146 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001147 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +02001148 /* check the trace buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001149 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +02001150 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001151 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +02001152
1153 if (!ret && !count) {
1154 printk(KERN_CONT ".. no entries found ..");
1155 ret = -1;
1156 }
1157
1158 return ret;
1159}
1160#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +02001161
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001162#ifdef CONFIG_BRANCH_TRACER
1163int
1164trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1165{
1166 unsigned long count;
1167 int ret;
1168
1169 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001170 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001171 if (ret) {
1172 warn_failed_init_tracer(trace, ret);
1173 return ret;
1174 }
1175
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001176 /* Sleep for a 1/10 of a second */
1177 msleep(100);
1178 /* stop the tracing. */
1179 tracing_stop();
1180 /* check the trace buffer */
Steven Rostedt (Red Hat)0184d502013-05-29 15:56:49 -04001181 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001182 trace->reset(tr);
1183 tracing_start();
1184
Wenji Huangd2ef7c22009-02-17 01:09:47 -05001185 if (!ret && !count) {
1186 printk(KERN_CONT ".. no entries found ..");
1187 ret = -1;
1188 }
1189
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001190 return ret;
1191}
1192#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +01001193