blob: 3c7b797d0d28f3d02f8af8dc1c0fddfd8c9525e1 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02006
Ingo Molnare309b412008-05-12 21:20:51 +02007static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02008{
9 switch (entry->type) {
10 case TRACE_FN:
11 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020012 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020013 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040014 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020015 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Markus Metzger321bb5e2009-03-13 10:50:27 +010019 case TRACE_HW_BRANCHES:
Steven Rostedt60a11772008-05-12 21:20:44 +020020 return 1;
21 }
22 return 0;
23}
24
Steven Rostedt3928a8a2008-09-29 23:02:41 -040025static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020026{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040027 struct ring_buffer_event *event;
28 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050029 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020030
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
32 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020033
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050034 /*
35 * The ring buffer is a size of trace_buf_size, if
36 * we loop more than the size, there's something wrong
37 * with the ring buffer.
38 */
39 if (loops++ > trace_buf_size) {
40 printk(KERN_CONT ".. bad ring buffer ");
41 goto failed;
42 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040043 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020044 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040045 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020046 goto failed;
47 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 }
Steven Rostedt60a11772008-05-12 21:20:44 +020049 return 0;
50
51 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020052 /* disable tracing */
53 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020054 printk(KERN_CONT ".. corrupted trace buffer .. ");
55 return -1;
56}
57
58/*
59 * Test the trace buffer to see if all the elements
60 * are still sane.
61 */
62static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020064 unsigned long flags, cnt = 0;
65 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020066
Steven Rostedt30afdcb2008-05-12 21:20:56 +020067 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050068 local_irq_save(flags);
Steven Rostedt30afdcb2008-05-12 21:20:56 +020069 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040070
71 cnt = ring_buffer_entries(tr->buffer);
72
Steven Rostedt0c5119c2009-02-18 18:33:57 -050073 /*
74 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 * If the calling tracer is broken, and is constantly filling
76 * the buffer, this will run forever, and hard lock the box.
77 * We disable the ring buffer while we do this test to prevent
78 * a hard lock up.
79 */
80 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020081 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040082 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020083 if (ret)
84 break;
85 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050086 tracing_on();
Steven Rostedt30afdcb2008-05-12 21:20:56 +020087 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050088 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020089
90 if (count)
91 *count = cnt;
92
93 return ret;
94}
95
Frederic Weisbecker1c800252008-11-16 05:57:26 +010096static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97{
98 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 trace->name, init_ret);
100}
Steven Rostedt606576c2008-10-06 19:06:12 -0400101#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200102
103#ifdef CONFIG_DYNAMIC_FTRACE
104
Steven Rostedt77a2b372008-05-12 21:20:45 +0200105/* Test dynamic code modification and ftrace filters */
106int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
107 struct trace_array *tr,
108 int (*func)(void))
109{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200110 int save_ftrace_enabled = ftrace_enabled;
111 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400112 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400113 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400114 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200115
116 /* The ftrace test PASSED */
117 printk(KERN_CONT "PASSED\n");
118 pr_info("Testing dynamic ftrace: ");
119
120 /* enable tracing, and record the filter function */
121 ftrace_enabled = 1;
122 tracer_enabled = 1;
123
124 /* passed in by parameter to fool gcc from optimizing */
125 func();
126
Steven Rostedt4e491d12008-05-14 23:49:44 -0400127 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500128 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400129 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500130 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400131 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400132 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400133
Steven Rostedt77a2b372008-05-12 21:20:45 +0200134 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400135 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200136
137 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200138 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100139 if (ret) {
140 warn_failed_init_tracer(trace, ret);
141 goto out;
142 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400143
Steven Rostedt77a2b372008-05-12 21:20:45 +0200144 /* Sleep for a 1/10 of a second */
145 msleep(100);
146
147 /* we should have nothing in the buffer */
148 ret = trace_test_buffer(tr, &count);
149 if (ret)
150 goto out;
151
152 if (count) {
153 ret = -1;
154 printk(KERN_CONT ".. filter did not filter .. ");
155 goto out;
156 }
157
158 /* call our function again */
159 func();
160
161 /* sleep again */
162 msleep(100);
163
164 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500165 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200166 ftrace_enabled = 0;
167
168 /* check the trace buffer */
169 ret = trace_test_buffer(tr, &count);
170 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500171 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200172
173 /* we should only have one item */
174 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200175 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200176 ret = -1;
177 goto out;
178 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500179
Steven Rostedt77a2b372008-05-12 21:20:45 +0200180 out:
181 ftrace_enabled = save_ftrace_enabled;
182 tracer_enabled = save_tracer_enabled;
183
184 /* Enable tracing on all functions again */
185 ftrace_set_filter(NULL, 0, 1);
186
187 return ret;
188}
189#else
190# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
191#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt60a11772008-05-12 21:20:44 +0200192/*
193 * Simple verification test of ftrace function tracer.
194 * Enable ftrace, sleep 1/10 second, and then read the trace
195 * buffer to see if all is in order.
196 */
197int
198trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
199{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200200 int save_ftrace_enabled = ftrace_enabled;
201 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400202 unsigned long count;
203 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200204
Steven Rostedt77a2b372008-05-12 21:20:45 +0200205 /* make sure msleep has been recorded */
206 msleep(1);
207
Steven Rostedt60a11772008-05-12 21:20:44 +0200208 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200209 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200210 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200211
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200212 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100213 if (ret) {
214 warn_failed_init_tracer(trace, ret);
215 goto out;
216 }
217
Steven Rostedt60a11772008-05-12 21:20:44 +0200218 /* Sleep for a 1/10 of a second */
219 msleep(100);
220 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500221 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200222 ftrace_enabled = 0;
223
Steven Rostedt60a11772008-05-12 21:20:44 +0200224 /* check the trace buffer */
225 ret = trace_test_buffer(tr, &count);
226 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500227 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200228
229 if (!ret && !count) {
230 printk(KERN_CONT ".. no entries found ..");
231 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200232 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200233 }
234
Steven Rostedt77a2b372008-05-12 21:20:45 +0200235 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
236 DYN_FTRACE_TEST_NAME);
237
238 out:
239 ftrace_enabled = save_ftrace_enabled;
240 tracer_enabled = save_tracer_enabled;
241
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200242 /* kill ftrace totally if we failed */
243 if (ret)
244 ftrace_kill();
245
Steven Rostedt60a11772008-05-12 21:20:44 +0200246 return ret;
247}
Steven Rostedt606576c2008-10-06 19:06:12 -0400248#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200249
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100250
251#ifdef CONFIG_FUNCTION_GRAPH_TRACER
252/*
253 * Pretty much the same than for the function tracer from which the selftest
254 * has been borrowed.
255 */
256int
257trace_selftest_startup_function_graph(struct tracer *trace,
258 struct trace_array *tr)
259{
260 int ret;
261 unsigned long count;
262
263 ret = tracer_init(trace, tr);
264 if (ret) {
265 warn_failed_init_tracer(trace, ret);
266 goto out;
267 }
268
269 /* Sleep for a 1/10 of a second */
270 msleep(100);
271
272 tracing_stop();
273
274 /* check the trace buffer */
275 ret = trace_test_buffer(tr, &count);
276
277 trace->reset(tr);
278 tracing_start();
279
280 if (!ret && !count) {
281 printk(KERN_CONT ".. no entries found ..");
282 ret = -1;
283 goto out;
284 }
285
286 /* Don't test dynamic tracing, the function tracer already did */
287
288out:
289 /* Stop it if we failed */
290 if (ret)
291 ftrace_graph_stop();
292
293 return ret;
294}
295#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
296
297
Steven Rostedt60a11772008-05-12 21:20:44 +0200298#ifdef CONFIG_IRQSOFF_TRACER
299int
300trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
301{
302 unsigned long save_max = tracing_max_latency;
303 unsigned long count;
304 int ret;
305
306 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200307 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100308 if (ret) {
309 warn_failed_init_tracer(trace, ret);
310 return ret;
311 }
312
Steven Rostedt60a11772008-05-12 21:20:44 +0200313 /* reset the max latency */
314 tracing_max_latency = 0;
315 /* disable interrupts for a bit */
316 local_irq_disable();
317 udelay(100);
318 local_irq_enable();
319 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500320 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200321 /* check both trace buffers */
322 ret = trace_test_buffer(tr, NULL);
323 if (!ret)
324 ret = trace_test_buffer(&max_tr, &count);
325 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500326 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200327
328 if (!ret && !count) {
329 printk(KERN_CONT ".. no entries found ..");
330 ret = -1;
331 }
332
333 tracing_max_latency = save_max;
334
335 return ret;
336}
337#endif /* CONFIG_IRQSOFF_TRACER */
338
339#ifdef CONFIG_PREEMPT_TRACER
340int
341trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
342{
343 unsigned long save_max = tracing_max_latency;
344 unsigned long count;
345 int ret;
346
Steven Rostedt769c48e2008-11-07 22:36:02 -0500347 /*
348 * Now that the big kernel lock is no longer preemptable,
349 * and this is called with the BKL held, it will always
350 * fail. If preemption is already disabled, simply
351 * pass the test. When the BKL is removed, or becomes
352 * preemptible again, we will once again test this,
353 * so keep it in.
354 */
355 if (preempt_count()) {
356 printk(KERN_CONT "can not test ... force ");
357 return 0;
358 }
359
Steven Rostedt60a11772008-05-12 21:20:44 +0200360 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200361 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100362 if (ret) {
363 warn_failed_init_tracer(trace, ret);
364 return ret;
365 }
366
Steven Rostedt60a11772008-05-12 21:20:44 +0200367 /* reset the max latency */
368 tracing_max_latency = 0;
369 /* disable preemption for a bit */
370 preempt_disable();
371 udelay(100);
372 preempt_enable();
373 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500374 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200375 /* check both trace buffers */
376 ret = trace_test_buffer(tr, NULL);
377 if (!ret)
378 ret = trace_test_buffer(&max_tr, &count);
379 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500380 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200381
382 if (!ret && !count) {
383 printk(KERN_CONT ".. no entries found ..");
384 ret = -1;
385 }
386
387 tracing_max_latency = save_max;
388
389 return ret;
390}
391#endif /* CONFIG_PREEMPT_TRACER */
392
393#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
394int
395trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
396{
397 unsigned long save_max = tracing_max_latency;
398 unsigned long count;
399 int ret;
400
Steven Rostedt769c48e2008-11-07 22:36:02 -0500401 /*
402 * Now that the big kernel lock is no longer preemptable,
403 * and this is called with the BKL held, it will always
404 * fail. If preemption is already disabled, simply
405 * pass the test. When the BKL is removed, or becomes
406 * preemptible again, we will once again test this,
407 * so keep it in.
408 */
409 if (preempt_count()) {
410 printk(KERN_CONT "can not test ... force ");
411 return 0;
412 }
413
Steven Rostedt60a11772008-05-12 21:20:44 +0200414 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200415 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100416 if (ret) {
417 warn_failed_init_tracer(trace, ret);
418 goto out;
419 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200420
421 /* reset the max latency */
422 tracing_max_latency = 0;
423
424 /* disable preemption and interrupts for a bit */
425 preempt_disable();
426 local_irq_disable();
427 udelay(100);
428 preempt_enable();
429 /* reverse the order of preempt vs irqs */
430 local_irq_enable();
431
432 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500433 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200434 /* check both trace buffers */
435 ret = trace_test_buffer(tr, NULL);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500436 if (ret) {
437 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200438 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500439 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200440
441 ret = trace_test_buffer(&max_tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500442 if (ret) {
443 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200444 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500445 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200446
447 if (!ret && !count) {
448 printk(KERN_CONT ".. no entries found ..");
449 ret = -1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500450 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200451 goto out;
452 }
453
454 /* do the test by disabling interrupts first this time */
455 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500456 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200457 preempt_disable();
458 local_irq_disable();
459 udelay(100);
460 preempt_enable();
461 /* reverse the order of preempt vs irqs */
462 local_irq_enable();
463
464 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500465 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200466 /* check both trace buffers */
467 ret = trace_test_buffer(tr, NULL);
468 if (ret)
469 goto out;
470
471 ret = trace_test_buffer(&max_tr, &count);
472
473 if (!ret && !count) {
474 printk(KERN_CONT ".. no entries found ..");
475 ret = -1;
476 goto out;
477 }
478
479 out:
480 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500481 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200482 tracing_max_latency = save_max;
483
484 return ret;
485}
486#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
487
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700488#ifdef CONFIG_NOP_TRACER
489int
490trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
491{
492 /* What could possibly go wrong? */
493 return 0;
494}
495#endif
496
Steven Rostedt60a11772008-05-12 21:20:44 +0200497#ifdef CONFIG_SCHED_TRACER
498static int trace_wakeup_test_thread(void *data)
499{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200500 /* Make this a RT thread, doesn't need to be too high */
501 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200502 struct completion *x = data;
503
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200504 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200505
506 /* Make it know we have a new prio */
507 complete(x);
508
509 /* now go to sleep and let the test wake us up */
510 set_current_state(TASK_INTERRUPTIBLE);
511 schedule();
512
513 /* we are awake, now wait to disappear */
514 while (!kthread_should_stop()) {
515 /*
516 * This is an RT task, do short sleeps to let
517 * others run.
518 */
519 msleep(100);
520 }
521
522 return 0;
523}
524
525int
526trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
527{
528 unsigned long save_max = tracing_max_latency;
529 struct task_struct *p;
530 struct completion isrt;
531 unsigned long count;
532 int ret;
533
534 init_completion(&isrt);
535
536 /* create a high prio thread */
537 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200538 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200539 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
540 return -1;
541 }
542
543 /* make sure the thread is running at an RT prio */
544 wait_for_completion(&isrt);
545
546 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200547 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100548 if (ret) {
549 warn_failed_init_tracer(trace, ret);
550 return ret;
551 }
552
Steven Rostedt60a11772008-05-12 21:20:44 +0200553 /* reset the max latency */
554 tracing_max_latency = 0;
555
556 /* sleep to let the RT thread sleep too */
557 msleep(100);
558
559 /*
560 * Yes this is slightly racy. It is possible that for some
561 * strange reason that the RT thread we created, did not
562 * call schedule for 100ms after doing the completion,
563 * and we do a wakeup on a task that already is awake.
564 * But that is extremely unlikely, and the worst thing that
565 * happens in such a case, is that we disable tracing.
566 * Honestly, if this race does happen something is horrible
567 * wrong with the system.
568 */
569
570 wake_up_process(p);
571
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400572 /* give a little time to let the thread wake up */
573 msleep(100);
574
Steven Rostedt60a11772008-05-12 21:20:44 +0200575 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500576 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200577 /* check both trace buffers */
578 ret = trace_test_buffer(tr, NULL);
579 if (!ret)
580 ret = trace_test_buffer(&max_tr, &count);
581
582
583 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500584 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200585
586 tracing_max_latency = save_max;
587
588 /* kill the thread */
589 kthread_stop(p);
590
591 if (!ret && !count) {
592 printk(KERN_CONT ".. no entries found ..");
593 ret = -1;
594 }
595
596 return ret;
597}
598#endif /* CONFIG_SCHED_TRACER */
599
600#ifdef CONFIG_CONTEXT_SWITCH_TRACER
601int
602trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
603{
604 unsigned long count;
605 int ret;
606
607 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200608 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100609 if (ret) {
610 warn_failed_init_tracer(trace, ret);
611 return ret;
612 }
613
Steven Rostedt60a11772008-05-12 21:20:44 +0200614 /* Sleep for a 1/10 of a second */
615 msleep(100);
616 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500617 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200618 /* check the trace buffer */
619 ret = trace_test_buffer(tr, &count);
620 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500621 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200622
623 if (!ret && !count) {
624 printk(KERN_CONT ".. no entries found ..");
625 ret = -1;
626 }
627
628 return ret;
629}
630#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200631
632#ifdef CONFIG_SYSPROF_TRACER
633int
634trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
635{
636 unsigned long count;
637 int ret;
638
639 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200640 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100641 if (ret) {
642 warn_failed_init_tracer(trace, ret);
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500643 return ret;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100644 }
645
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200646 /* Sleep for a 1/10 of a second */
647 msleep(100);
648 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500649 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200650 /* check the trace buffer */
651 ret = trace_test_buffer(tr, &count);
652 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500653 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200654
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500655 if (!ret && !count) {
656 printk(KERN_CONT ".. no entries found ..");
657 ret = -1;
658 }
659
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200660 return ret;
661}
662#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500663
664#ifdef CONFIG_BRANCH_TRACER
665int
666trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
667{
668 unsigned long count;
669 int ret;
670
671 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200672 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100673 if (ret) {
674 warn_failed_init_tracer(trace, ret);
675 return ret;
676 }
677
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500678 /* Sleep for a 1/10 of a second */
679 msleep(100);
680 /* stop the tracing. */
681 tracing_stop();
682 /* check the trace buffer */
683 ret = trace_test_buffer(tr, &count);
684 trace->reset(tr);
685 tracing_start();
686
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500687 if (!ret && !count) {
688 printk(KERN_CONT ".. no entries found ..");
689 ret = -1;
690 }
691
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500692 return ret;
693}
694#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +0100695
696#ifdef CONFIG_HW_BRANCH_TRACER
697int
698trace_selftest_startup_hw_branches(struct tracer *trace,
699 struct trace_array *tr)
700{
701 unsigned long count;
702 int ret;
703 struct trace_iterator iter;
704 struct tracer tracer;
705
706 if (!trace->open) {
707 printk(KERN_CONT "missing open function...");
708 return -1;
709 }
710
711 ret = tracer_init(trace, tr);
712 if (ret) {
713 warn_failed_init_tracer(trace, ret);
714 return ret;
715 }
716
717 /*
718 * The hw-branch tracer needs to collect the trace from the various
719 * cpu trace buffers - before tracing is stopped.
720 */
721 memset(&iter, 0, sizeof(iter));
722 memcpy(&tracer, trace, sizeof(tracer));
723
724 iter.trace = &tracer;
725 iter.tr = tr;
726 iter.pos = -1;
727 mutex_init(&iter.mutex);
728
729 trace->open(&iter);
730
731 mutex_destroy(&iter.mutex);
732
733 tracing_stop();
734
735 ret = trace_test_buffer(tr, &count);
736 trace->reset(tr);
737 tracing_start();
738
739 if (!ret && !count) {
740 printk(KERN_CONT "no entries found..");
741 ret = -1;
742 }
743
744 return ret;
745}
746#endif /* CONFIG_HW_BRANCH_TRACER */