blob: 86422f91dbe1d50929a5315f414b4799f14af68e [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02007
Ingo Molnare309b412008-05-12 21:20:51 +02008static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02009{
10 switch (entry->type) {
11 case TRACE_FN:
12 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020013 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040015 case TRACE_PRINT:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020019 return 1;
20 }
21 return 0;
22}
23
Steven Rostedt3928a8a2008-09-29 23:02:41 -040024static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020025{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040026 struct ring_buffer_event *event;
27 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050028 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020029
Steven Rostedt66a8cb92010-03-31 13:21:56 -040030 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020032
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050033 /*
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
37 */
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
40 goto failed;
41 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040042 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020043 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040044 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020045 goto failed;
46 }
Steven Rostedt60a11772008-05-12 21:20:44 +020047 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 return 0;
49
50 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020051 /* disable tracing */
52 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020053 printk(KERN_CONT ".. corrupted trace buffer .. ");
54 return -1;
55}
56
57/*
58 * Test the trace buffer to see if all the elements
59 * are still sane.
60 */
61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020063 unsigned long flags, cnt = 0;
64 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020065
Steven Rostedt30afdcb2008-05-12 21:20:56 +020066 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050067 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010068 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040069
70 cnt = ring_buffer_entries(tr->buffer);
71
Steven Rostedt0c5119c2009-02-18 18:33:57 -050072 /*
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
77 * a hard lock up.
78 */
79 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020080 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040081 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020082 if (ret)
83 break;
84 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050085 tracing_on();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010086 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050087 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020088
89 if (count)
90 *count = cnt;
91
92 return ret;
93}
94
Frederic Weisbecker1c800252008-11-16 05:57:26 +010095static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96{
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
99}
Steven Rostedt606576c2008-10-06 19:06:12 -0400100#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200101
102#ifdef CONFIG_DYNAMIC_FTRACE
103
Steven Rostedt95950c22011-05-06 00:08:51 -0400104static int trace_selftest_test_probe1_cnt;
105static void trace_selftest_test_probe1_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400106 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400107 struct ftrace_ops *op,
108 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400109{
110 trace_selftest_test_probe1_cnt++;
111}
112
113static int trace_selftest_test_probe2_cnt;
114static void trace_selftest_test_probe2_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400115 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400116 struct ftrace_ops *op,
117 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400118{
119 trace_selftest_test_probe2_cnt++;
120}
121
122static int trace_selftest_test_probe3_cnt;
123static void trace_selftest_test_probe3_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400124 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400125 struct ftrace_ops *op,
126 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400127{
128 trace_selftest_test_probe3_cnt++;
129}
130
131static int trace_selftest_test_global_cnt;
132static void trace_selftest_test_global_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400133 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400134 struct ftrace_ops *op,
135 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400136{
137 trace_selftest_test_global_cnt++;
138}
139
140static int trace_selftest_test_dyn_cnt;
141static void trace_selftest_test_dyn_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400142 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400143 struct ftrace_ops *op,
144 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400145{
146 trace_selftest_test_dyn_cnt++;
147}
148
149static struct ftrace_ops test_probe1 = {
150 .func = trace_selftest_test_probe1_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400151 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400152};
153
154static struct ftrace_ops test_probe2 = {
155 .func = trace_selftest_test_probe2_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400156 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400157};
158
159static struct ftrace_ops test_probe3 = {
160 .func = trace_selftest_test_probe3_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400161 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400162};
163
164static struct ftrace_ops test_global = {
Steven Rostedt47409742012-07-20 11:04:44 -0400165 .func = trace_selftest_test_global_func,
166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400167};
168
169static void print_counts(void)
170{
171 printk("(%d %d %d %d %d) ",
172 trace_selftest_test_probe1_cnt,
173 trace_selftest_test_probe2_cnt,
174 trace_selftest_test_probe3_cnt,
175 trace_selftest_test_global_cnt,
176 trace_selftest_test_dyn_cnt);
177}
178
179static void reset_counts(void)
180{
181 trace_selftest_test_probe1_cnt = 0;
182 trace_selftest_test_probe2_cnt = 0;
183 trace_selftest_test_probe3_cnt = 0;
184 trace_selftest_test_global_cnt = 0;
185 trace_selftest_test_dyn_cnt = 0;
186}
187
188static int trace_selftest_ops(int cnt)
189{
190 int save_ftrace_enabled = ftrace_enabled;
191 struct ftrace_ops *dyn_ops;
192 char *func1_name;
193 char *func2_name;
194 int len1;
195 int len2;
196 int ret = -1;
197
198 printk(KERN_CONT "PASSED\n");
199 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
200
201 ftrace_enabled = 1;
202 reset_counts();
203
204 /* Handle PPC64 '.' name */
205 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
206 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
207 len1 = strlen(func1_name);
208 len2 = strlen(func2_name);
209
210 /*
211 * Probe 1 will trace function 1.
212 * Probe 2 will trace function 2.
213 * Probe 3 will trace functions 1 and 2.
214 */
215 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
216 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
217 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
218 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
219
220 register_ftrace_function(&test_probe1);
221 register_ftrace_function(&test_probe2);
222 register_ftrace_function(&test_probe3);
223 register_ftrace_function(&test_global);
224
225 DYN_FTRACE_TEST_NAME();
226
227 print_counts();
228
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
235 if (trace_selftest_test_global_cnt == 0)
236 goto out;
237
238 DYN_FTRACE_TEST_NAME2();
239
240 print_counts();
241
242 if (trace_selftest_test_probe1_cnt != 1)
243 goto out;
244 if (trace_selftest_test_probe2_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe3_cnt != 2)
247 goto out;
248
249 /* Add a dynamic probe */
250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 if (!dyn_ops) {
252 printk("MEMORY ERROR ");
253 goto out;
254 }
255
256 dyn_ops->func = trace_selftest_test_dyn_func;
257
258 register_ftrace_function(dyn_ops);
259
260 trace_selftest_test_global_cnt = 0;
261
262 DYN_FTRACE_TEST_NAME();
263
264 print_counts();
265
266 if (trace_selftest_test_probe1_cnt != 2)
267 goto out_free;
268 if (trace_selftest_test_probe2_cnt != 1)
269 goto out_free;
270 if (trace_selftest_test_probe3_cnt != 3)
271 goto out_free;
272 if (trace_selftest_test_global_cnt == 0)
273 goto out;
274 if (trace_selftest_test_dyn_cnt == 0)
275 goto out_free;
276
277 DYN_FTRACE_TEST_NAME2();
278
279 print_counts();
280
281 if (trace_selftest_test_probe1_cnt != 2)
282 goto out_free;
283 if (trace_selftest_test_probe2_cnt != 2)
284 goto out_free;
285 if (trace_selftest_test_probe3_cnt != 4)
286 goto out_free;
287
288 ret = 0;
289 out_free:
290 unregister_ftrace_function(dyn_ops);
291 kfree(dyn_ops);
292
293 out:
294 /* Purposely unregister in the same order */
295 unregister_ftrace_function(&test_probe1);
296 unregister_ftrace_function(&test_probe2);
297 unregister_ftrace_function(&test_probe3);
298 unregister_ftrace_function(&test_global);
299
300 /* Make sure everything is off */
301 reset_counts();
302 DYN_FTRACE_TEST_NAME();
303 DYN_FTRACE_TEST_NAME();
304
305 if (trace_selftest_test_probe1_cnt ||
306 trace_selftest_test_probe2_cnt ||
307 trace_selftest_test_probe3_cnt ||
308 trace_selftest_test_global_cnt ||
309 trace_selftest_test_dyn_cnt)
310 ret = -1;
311
312 ftrace_enabled = save_ftrace_enabled;
313
314 return ret;
315}
316
Steven Rostedt77a2b372008-05-12 21:20:45 +0200317/* Test dynamic code modification and ftrace filters */
318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
319 struct trace_array *tr,
320 int (*func)(void))
321{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200322 int save_ftrace_enabled = ftrace_enabled;
323 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400324 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400325 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400326 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200327
328 /* The ftrace test PASSED */
329 printk(KERN_CONT "PASSED\n");
330 pr_info("Testing dynamic ftrace: ");
331
332 /* enable tracing, and record the filter function */
333 ftrace_enabled = 1;
334 tracer_enabled = 1;
335
336 /* passed in by parameter to fool gcc from optimizing */
337 func();
338
Steven Rostedt4e491d12008-05-14 23:49:44 -0400339 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500340 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400341 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500342 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400343 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400344 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400345
Steven Rostedt77a2b372008-05-12 21:20:45 +0200346 /* filter only on our function */
Steven Rostedt936e0742011-05-05 22:54:01 -0400347 ftrace_set_global_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200348
349 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200350 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100351 if (ret) {
352 warn_failed_init_tracer(trace, ret);
353 goto out;
354 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400355
Steven Rostedt77a2b372008-05-12 21:20:45 +0200356 /* Sleep for a 1/10 of a second */
357 msleep(100);
358
359 /* we should have nothing in the buffer */
360 ret = trace_test_buffer(tr, &count);
361 if (ret)
362 goto out;
363
364 if (count) {
365 ret = -1;
366 printk(KERN_CONT ".. filter did not filter .. ");
367 goto out;
368 }
369
370 /* call our function again */
371 func();
372
373 /* sleep again */
374 msleep(100);
375
376 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500377 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200378 ftrace_enabled = 0;
379
380 /* check the trace buffer */
381 ret = trace_test_buffer(tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500382 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200383
384 /* we should only have one item */
385 if (!ret && count != 1) {
Steven Rostedt95950c22011-05-06 00:08:51 -0400386 trace->reset(tr);
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200387 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200388 ret = -1;
389 goto out;
390 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500391
Steven Rostedt95950c22011-05-06 00:08:51 -0400392 /* Test the ops with global tracing running */
393 ret = trace_selftest_ops(1);
394 trace->reset(tr);
395
Steven Rostedt77a2b372008-05-12 21:20:45 +0200396 out:
397 ftrace_enabled = save_ftrace_enabled;
398 tracer_enabled = save_tracer_enabled;
399
400 /* Enable tracing on all functions again */
Steven Rostedt936e0742011-05-05 22:54:01 -0400401 ftrace_set_global_filter(NULL, 0, 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200402
Steven Rostedt95950c22011-05-06 00:08:51 -0400403 /* Test the ops with global tracing off */
404 if (!ret)
405 ret = trace_selftest_ops(2);
406
Steven Rostedt77a2b372008-05-12 21:20:45 +0200407 return ret;
408}
Steven Rostedtea701f12012-07-20 13:08:05 -0400409
410static int trace_selftest_recursion_cnt;
411static void trace_selftest_test_recursion_func(unsigned long ip,
412 unsigned long pip,
413 struct ftrace_ops *op,
414 struct pt_regs *pt_regs)
415{
416 /*
417 * This function is registered without the recursion safe flag.
418 * The ftrace infrastructure should provide the recursion
419 * protection. If not, this will crash the kernel!
420 */
421 trace_selftest_recursion_cnt++;
422 DYN_FTRACE_TEST_NAME();
423}
424
425static void trace_selftest_test_recursion_safe_func(unsigned long ip,
426 unsigned long pip,
427 struct ftrace_ops *op,
428 struct pt_regs *pt_regs)
429{
430 /*
431 * We said we would provide our own recursion. By calling
432 * this function again, we should recurse back into this function
433 * and count again. But this only happens if the arch supports
434 * all of ftrace features and nothing else is using the function
435 * tracing utility.
436 */
437 if (trace_selftest_recursion_cnt++)
438 return;
439 DYN_FTRACE_TEST_NAME();
440}
441
442static struct ftrace_ops test_rec_probe = {
443 .func = trace_selftest_test_recursion_func,
444};
445
446static struct ftrace_ops test_recsafe_probe = {
447 .func = trace_selftest_test_recursion_safe_func,
448 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
449};
450
451static int
452trace_selftest_function_recursion(void)
453{
454 int save_ftrace_enabled = ftrace_enabled;
455 int save_tracer_enabled = tracer_enabled;
456 char *func_name;
457 int len;
458 int ret;
459 int cnt;
460
461 /* The previous test PASSED */
462 pr_cont("PASSED\n");
463 pr_info("Testing ftrace recursion: ");
464
465
466 /* enable tracing, and record the filter function */
467 ftrace_enabled = 1;
468 tracer_enabled = 1;
469
470 /* Handle PPC64 '.' name */
471 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
472 len = strlen(func_name);
473
474 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
475 if (ret) {
476 pr_cont("*Could not set filter* ");
477 goto out;
478 }
479
480 ret = register_ftrace_function(&test_rec_probe);
481 if (ret) {
482 pr_cont("*could not register callback* ");
483 goto out;
484 }
485
486 DYN_FTRACE_TEST_NAME();
487
488 unregister_ftrace_function(&test_rec_probe);
489
490 ret = -1;
491 if (trace_selftest_recursion_cnt != 1) {
492 pr_cont("*callback not called once (%d)* ",
493 trace_selftest_recursion_cnt);
494 goto out;
495 }
496
497 trace_selftest_recursion_cnt = 1;
498
499 pr_cont("PASSED\n");
500 pr_info("Testing ftrace recursion safe: ");
501
502 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
503 if (ret) {
504 pr_cont("*Could not set filter* ");
505 goto out;
506 }
507
508 ret = register_ftrace_function(&test_recsafe_probe);
509 if (ret) {
510 pr_cont("*could not register callback* ");
511 goto out;
512 }
513
514 DYN_FTRACE_TEST_NAME();
515
516 unregister_ftrace_function(&test_recsafe_probe);
517
518 /*
519 * If arch supports all ftrace features, and no other task
520 * was on the list, we should be fine.
521 */
522 if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
523 cnt = 2; /* Should have recursed */
524 else
525 cnt = 1;
526
527 ret = -1;
528 if (trace_selftest_recursion_cnt != cnt) {
529 pr_cont("*callback not called expected %d times (%d)* ",
530 cnt, trace_selftest_recursion_cnt);
531 goto out;
532 }
533
534 ret = 0;
535out:
536 ftrace_enabled = save_ftrace_enabled;
537 tracer_enabled = save_tracer_enabled;
538
539 return ret;
540}
Steven Rostedt77a2b372008-05-12 21:20:45 +0200541#else
542# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
Steven Rostedtea701f12012-07-20 13:08:05 -0400543# define trace_selftest_function_recursion() ({ 0; })
Steven Rostedt77a2b372008-05-12 21:20:45 +0200544#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100545
Steven Rostedt60a11772008-05-12 21:20:44 +0200546/*
547 * Simple verification test of ftrace function tracer.
548 * Enable ftrace, sleep 1/10 second, and then read the trace
549 * buffer to see if all is in order.
550 */
551int
552trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
553{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200554 int save_ftrace_enabled = ftrace_enabled;
555 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400556 unsigned long count;
557 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200558
Steven Rostedt77a2b372008-05-12 21:20:45 +0200559 /* make sure msleep has been recorded */
560 msleep(1);
561
Steven Rostedt60a11772008-05-12 21:20:44 +0200562 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200563 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200564 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200565
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200566 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100567 if (ret) {
568 warn_failed_init_tracer(trace, ret);
569 goto out;
570 }
571
Steven Rostedt60a11772008-05-12 21:20:44 +0200572 /* Sleep for a 1/10 of a second */
573 msleep(100);
574 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500575 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200576 ftrace_enabled = 0;
577
Steven Rostedt60a11772008-05-12 21:20:44 +0200578 /* check the trace buffer */
579 ret = trace_test_buffer(tr, &count);
580 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500581 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200582
583 if (!ret && !count) {
584 printk(KERN_CONT ".. no entries found ..");
585 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200586 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200587 }
588
Steven Rostedt77a2b372008-05-12 21:20:45 +0200589 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
590 DYN_FTRACE_TEST_NAME);
Steven Rostedtea701f12012-07-20 13:08:05 -0400591 if (ret)
592 goto out;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200593
Steven Rostedtea701f12012-07-20 13:08:05 -0400594 ret = trace_selftest_function_recursion();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200595 out:
596 ftrace_enabled = save_ftrace_enabled;
597 tracer_enabled = save_tracer_enabled;
598
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200599 /* kill ftrace totally if we failed */
600 if (ret)
601 ftrace_kill();
602
Steven Rostedt60a11772008-05-12 21:20:44 +0200603 return ret;
604}
Steven Rostedt606576c2008-10-06 19:06:12 -0400605#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200606
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100607
608#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100609
610/* Maximum number of functions to trace before diagnosing a hang */
611#define GRAPH_MAX_FUNC_TEST 100000000
612
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200613static void
614__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100615static unsigned int graph_hang_thresh;
616
617/* Wrap the real function entry probe to avoid possible hanging */
618static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
619{
620 /* This is harmlessly racy, we want to approximately detect a hang */
621 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
622 ftrace_graph_stop();
623 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
624 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200625 __ftrace_dump(false, DUMP_ALL);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100626 return 0;
627 }
628
629 return trace_graph_entry(trace);
630}
631
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100632/*
633 * Pretty much the same than for the function tracer from which the selftest
634 * has been borrowed.
635 */
636int
637trace_selftest_startup_function_graph(struct tracer *trace,
638 struct trace_array *tr)
639{
640 int ret;
641 unsigned long count;
642
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100643 /*
644 * Simulate the init() callback but we attach a watchdog callback
645 * to detect and recover from possible hangs
646 */
647 tracing_reset_online_cpus(tr);
Frederic Weisbecker1a0799a2009-07-29 18:59:58 +0200648 set_graph_array(tr);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100649 ret = register_ftrace_graph(&trace_graph_return,
650 &trace_graph_entry_watchdog);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100651 if (ret) {
652 warn_failed_init_tracer(trace, ret);
653 goto out;
654 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100655 tracing_start_cmdline_record();
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100656
657 /* Sleep for a 1/10 of a second */
658 msleep(100);
659
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100660 /* Have we just recovered from a hang? */
661 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
Frederic Weisbecker0cf53ff2009-03-22 15:13:07 +0100662 tracing_selftest_disabled = true;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100663 ret = -1;
664 goto out;
665 }
666
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100667 tracing_stop();
668
669 /* check the trace buffer */
670 ret = trace_test_buffer(tr, &count);
671
672 trace->reset(tr);
673 tracing_start();
674
675 if (!ret && !count) {
676 printk(KERN_CONT ".. no entries found ..");
677 ret = -1;
678 goto out;
679 }
680
681 /* Don't test dynamic tracing, the function tracer already did */
682
683out:
684 /* Stop it if we failed */
685 if (ret)
686 ftrace_graph_stop();
687
688 return ret;
689}
690#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
691
692
Steven Rostedt60a11772008-05-12 21:20:44 +0200693#ifdef CONFIG_IRQSOFF_TRACER
694int
695trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
696{
697 unsigned long save_max = tracing_max_latency;
698 unsigned long count;
699 int ret;
700
701 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200702 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100703 if (ret) {
704 warn_failed_init_tracer(trace, ret);
705 return ret;
706 }
707
Steven Rostedt60a11772008-05-12 21:20:44 +0200708 /* reset the max latency */
709 tracing_max_latency = 0;
710 /* disable interrupts for a bit */
711 local_irq_disable();
712 udelay(100);
713 local_irq_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100714
715 /*
716 * Stop the tracer to avoid a warning subsequent
717 * to buffer flipping failure because tracing_stop()
718 * disables the tr and max buffers, making flipping impossible
719 * in case of parallels max irqs off latencies.
720 */
721 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200722 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500723 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200724 /* check both trace buffers */
725 ret = trace_test_buffer(tr, NULL);
726 if (!ret)
727 ret = trace_test_buffer(&max_tr, &count);
728 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500729 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200730
731 if (!ret && !count) {
732 printk(KERN_CONT ".. no entries found ..");
733 ret = -1;
734 }
735
736 tracing_max_latency = save_max;
737
738 return ret;
739}
740#endif /* CONFIG_IRQSOFF_TRACER */
741
742#ifdef CONFIG_PREEMPT_TRACER
743int
744trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
745{
746 unsigned long save_max = tracing_max_latency;
747 unsigned long count;
748 int ret;
749
Steven Rostedt769c48e2008-11-07 22:36:02 -0500750 /*
751 * Now that the big kernel lock is no longer preemptable,
752 * and this is called with the BKL held, it will always
753 * fail. If preemption is already disabled, simply
754 * pass the test. When the BKL is removed, or becomes
755 * preemptible again, we will once again test this,
756 * so keep it in.
757 */
758 if (preempt_count()) {
759 printk(KERN_CONT "can not test ... force ");
760 return 0;
761 }
762
Steven Rostedt60a11772008-05-12 21:20:44 +0200763 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200764 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100765 if (ret) {
766 warn_failed_init_tracer(trace, ret);
767 return ret;
768 }
769
Steven Rostedt60a11772008-05-12 21:20:44 +0200770 /* reset the max latency */
771 tracing_max_latency = 0;
772 /* disable preemption for a bit */
773 preempt_disable();
774 udelay(100);
775 preempt_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100776
777 /*
778 * Stop the tracer to avoid a warning subsequent
779 * to buffer flipping failure because tracing_stop()
780 * disables the tr and max buffers, making flipping impossible
781 * in case of parallels max preempt off latencies.
782 */
783 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200784 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500785 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200786 /* check both trace buffers */
787 ret = trace_test_buffer(tr, NULL);
788 if (!ret)
789 ret = trace_test_buffer(&max_tr, &count);
790 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500791 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200792
793 if (!ret && !count) {
794 printk(KERN_CONT ".. no entries found ..");
795 ret = -1;
796 }
797
798 tracing_max_latency = save_max;
799
800 return ret;
801}
802#endif /* CONFIG_PREEMPT_TRACER */
803
804#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
805int
806trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
807{
808 unsigned long save_max = tracing_max_latency;
809 unsigned long count;
810 int ret;
811
Steven Rostedt769c48e2008-11-07 22:36:02 -0500812 /*
813 * Now that the big kernel lock is no longer preemptable,
814 * and this is called with the BKL held, it will always
815 * fail. If preemption is already disabled, simply
816 * pass the test. When the BKL is removed, or becomes
817 * preemptible again, we will once again test this,
818 * so keep it in.
819 */
820 if (preempt_count()) {
821 printk(KERN_CONT "can not test ... force ");
822 return 0;
823 }
824
Steven Rostedt60a11772008-05-12 21:20:44 +0200825 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200826 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100827 if (ret) {
828 warn_failed_init_tracer(trace, ret);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100829 goto out_no_start;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100830 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200831
832 /* reset the max latency */
833 tracing_max_latency = 0;
834
835 /* disable preemption and interrupts for a bit */
836 preempt_disable();
837 local_irq_disable();
838 udelay(100);
839 preempt_enable();
840 /* reverse the order of preempt vs irqs */
841 local_irq_enable();
842
Frederic Weisbecker49036202009-03-17 22:38:58 +0100843 /*
844 * Stop the tracer to avoid a warning subsequent
845 * to buffer flipping failure because tracing_stop()
846 * disables the tr and max buffers, making flipping impossible
847 * in case of parallels max irqs/preempt off latencies.
848 */
849 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200850 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500851 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200852 /* check both trace buffers */
853 ret = trace_test_buffer(tr, NULL);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100854 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200855 goto out;
856
857 ret = trace_test_buffer(&max_tr, &count);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100858 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200859 goto out;
860
861 if (!ret && !count) {
862 printk(KERN_CONT ".. no entries found ..");
863 ret = -1;
864 goto out;
865 }
866
867 /* do the test by disabling interrupts first this time */
868 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500869 tracing_start();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100870 trace->start(tr);
871
Steven Rostedt60a11772008-05-12 21:20:44 +0200872 preempt_disable();
873 local_irq_disable();
874 udelay(100);
875 preempt_enable();
876 /* reverse the order of preempt vs irqs */
877 local_irq_enable();
878
Frederic Weisbecker49036202009-03-17 22:38:58 +0100879 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200880 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500881 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200882 /* check both trace buffers */
883 ret = trace_test_buffer(tr, NULL);
884 if (ret)
885 goto out;
886
887 ret = trace_test_buffer(&max_tr, &count);
888
889 if (!ret && !count) {
890 printk(KERN_CONT ".. no entries found ..");
891 ret = -1;
892 goto out;
893 }
894
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100895out:
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500896 tracing_start();
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100897out_no_start:
898 trace->reset(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200899 tracing_max_latency = save_max;
900
901 return ret;
902}
903#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
904
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700905#ifdef CONFIG_NOP_TRACER
906int
907trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
908{
909 /* What could possibly go wrong? */
910 return 0;
911}
912#endif
913
Steven Rostedt60a11772008-05-12 21:20:44 +0200914#ifdef CONFIG_SCHED_TRACER
915static int trace_wakeup_test_thread(void *data)
916{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200917 /* Make this a RT thread, doesn't need to be too high */
Peter Zijlstrac9b5f502011-01-07 13:41:40 +0100918 static const struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200919 struct completion *x = data;
920
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200921 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200922
923 /* Make it know we have a new prio */
924 complete(x);
925
926 /* now go to sleep and let the test wake us up */
927 set_current_state(TASK_INTERRUPTIBLE);
928 schedule();
929
930 /* we are awake, now wait to disappear */
931 while (!kthread_should_stop()) {
932 /*
933 * This is an RT task, do short sleeps to let
934 * others run.
935 */
936 msleep(100);
937 }
938
939 return 0;
940}
941
942int
943trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
944{
945 unsigned long save_max = tracing_max_latency;
946 struct task_struct *p;
947 struct completion isrt;
948 unsigned long count;
949 int ret;
950
951 init_completion(&isrt);
952
953 /* create a high prio thread */
954 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200955 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200956 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
957 return -1;
958 }
959
960 /* make sure the thread is running at an RT prio */
961 wait_for_completion(&isrt);
962
963 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200964 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100965 if (ret) {
966 warn_failed_init_tracer(trace, ret);
967 return ret;
968 }
969
Steven Rostedt60a11772008-05-12 21:20:44 +0200970 /* reset the max latency */
971 tracing_max_latency = 0;
972
973 /* sleep to let the RT thread sleep too */
974 msleep(100);
975
976 /*
977 * Yes this is slightly racy. It is possible that for some
978 * strange reason that the RT thread we created, did not
979 * call schedule for 100ms after doing the completion,
980 * and we do a wakeup on a task that already is awake.
981 * But that is extremely unlikely, and the worst thing that
982 * happens in such a case, is that we disable tracing.
983 * Honestly, if this race does happen something is horrible
984 * wrong with the system.
985 */
986
987 wake_up_process(p);
988
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400989 /* give a little time to let the thread wake up */
990 msleep(100);
991
Steven Rostedt60a11772008-05-12 21:20:44 +0200992 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500993 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200994 /* check both trace buffers */
995 ret = trace_test_buffer(tr, NULL);
996 if (!ret)
997 ret = trace_test_buffer(&max_tr, &count);
998
999
1000 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001001 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +02001002
1003 tracing_max_latency = save_max;
1004
1005 /* kill the thread */
1006 kthread_stop(p);
1007
1008 if (!ret && !count) {
1009 printk(KERN_CONT ".. no entries found ..");
1010 ret = -1;
1011 }
1012
1013 return ret;
1014}
1015#endif /* CONFIG_SCHED_TRACER */
1016
1017#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1018int
1019trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1020{
1021 unsigned long count;
1022 int ret;
1023
1024 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001025 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001026 if (ret) {
1027 warn_failed_init_tracer(trace, ret);
1028 return ret;
1029 }
1030
Steven Rostedt60a11772008-05-12 21:20:44 +02001031 /* Sleep for a 1/10 of a second */
1032 msleep(100);
1033 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001034 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +02001035 /* check the trace buffer */
1036 ret = trace_test_buffer(tr, &count);
1037 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001038 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +02001039
1040 if (!ret && !count) {
1041 printk(KERN_CONT ".. no entries found ..");
1042 ret = -1;
1043 }
1044
1045 return ret;
1046}
1047#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +02001048
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001049#ifdef CONFIG_BRANCH_TRACER
1050int
1051trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1052{
1053 unsigned long count;
1054 int ret;
1055
1056 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001057 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001058 if (ret) {
1059 warn_failed_init_tracer(trace, ret);
1060 return ret;
1061 }
1062
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001063 /* Sleep for a 1/10 of a second */
1064 msleep(100);
1065 /* stop the tracing. */
1066 tracing_stop();
1067 /* check the trace buffer */
1068 ret = trace_test_buffer(tr, &count);
1069 trace->reset(tr);
1070 tracing_start();
1071
Wenji Huangd2ef7c22009-02-17 01:09:47 -05001072 if (!ret && !count) {
1073 printk(KERN_CONT ".. no entries found ..");
1074 ret = -1;
1075 }
1076
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001077 return ret;
1078}
1079#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +01001080