blob: b91091267067651c7e54d8136726b2b27c1dd675 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02006
Ingo Molnare309b412008-05-12 21:20:51 +02007static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02008{
9 switch (entry->type) {
10 case TRACE_FN:
11 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020012 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020013 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040014 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020015 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Markus Metzger321bb5e2009-03-13 10:50:27 +010019 case TRACE_HW_BRANCHES:
Steven Rostedt60a11772008-05-12 21:20:44 +020020 return 1;
21 }
22 return 0;
23}
24
Steven Rostedt3928a8a2008-09-29 23:02:41 -040025static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020026{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040027 struct ring_buffer_event *event;
28 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050029 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020030
Steven Rostedt3928a8a2008-09-29 23:02:41 -040031 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
32 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020033
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050034 /*
35 * The ring buffer is a size of trace_buf_size, if
36 * we loop more than the size, there's something wrong
37 * with the ring buffer.
38 */
39 if (loops++ > trace_buf_size) {
40 printk(KERN_CONT ".. bad ring buffer ");
41 goto failed;
42 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040043 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020044 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040045 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020046 goto failed;
47 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 }
Steven Rostedt60a11772008-05-12 21:20:44 +020049 return 0;
50
51 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020052 /* disable tracing */
53 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020054 printk(KERN_CONT ".. corrupted trace buffer .. ");
55 return -1;
56}
57
58/*
59 * Test the trace buffer to see if all the elements
60 * are still sane.
61 */
62static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020064 unsigned long flags, cnt = 0;
65 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020066
Steven Rostedt30afdcb2008-05-12 21:20:56 +020067 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050068 local_irq_save(flags);
Steven Rostedt30afdcb2008-05-12 21:20:56 +020069 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040070
71 cnt = ring_buffer_entries(tr->buffer);
72
Steven Rostedt0c5119c2009-02-18 18:33:57 -050073 /*
74 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 * If the calling tracer is broken, and is constantly filling
76 * the buffer, this will run forever, and hard lock the box.
77 * We disable the ring buffer while we do this test to prevent
78 * a hard lock up.
79 */
80 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020081 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040082 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020083 if (ret)
84 break;
85 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050086 tracing_on();
Steven Rostedt30afdcb2008-05-12 21:20:56 +020087 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050088 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020089
90 if (count)
91 *count = cnt;
92
93 return ret;
94}
95
Frederic Weisbecker1c800252008-11-16 05:57:26 +010096static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97{
98 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 trace->name, init_ret);
100}
Steven Rostedt606576c2008-10-06 19:06:12 -0400101#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200102
103#ifdef CONFIG_DYNAMIC_FTRACE
104
Steven Rostedt77a2b372008-05-12 21:20:45 +0200105/* Test dynamic code modification and ftrace filters */
106int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
107 struct trace_array *tr,
108 int (*func)(void))
109{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200110 int save_ftrace_enabled = ftrace_enabled;
111 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400112 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400113 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400114 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200115
116 /* The ftrace test PASSED */
117 printk(KERN_CONT "PASSED\n");
118 pr_info("Testing dynamic ftrace: ");
119
120 /* enable tracing, and record the filter function */
121 ftrace_enabled = 1;
122 tracer_enabled = 1;
123
124 /* passed in by parameter to fool gcc from optimizing */
125 func();
126
Steven Rostedt4e491d12008-05-14 23:49:44 -0400127 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500128 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400129 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500130 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400131 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400132 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400133
Steven Rostedt77a2b372008-05-12 21:20:45 +0200134 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400135 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200136
137 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200138 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100139 if (ret) {
140 warn_failed_init_tracer(trace, ret);
141 goto out;
142 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400143
Steven Rostedt77a2b372008-05-12 21:20:45 +0200144 /* Sleep for a 1/10 of a second */
145 msleep(100);
146
147 /* we should have nothing in the buffer */
148 ret = trace_test_buffer(tr, &count);
149 if (ret)
150 goto out;
151
152 if (count) {
153 ret = -1;
154 printk(KERN_CONT ".. filter did not filter .. ");
155 goto out;
156 }
157
158 /* call our function again */
159 func();
160
161 /* sleep again */
162 msleep(100);
163
164 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500165 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200166 ftrace_enabled = 0;
167
168 /* check the trace buffer */
169 ret = trace_test_buffer(tr, &count);
170 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500171 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200172
173 /* we should only have one item */
174 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200175 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200176 ret = -1;
177 goto out;
178 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500179
Steven Rostedt77a2b372008-05-12 21:20:45 +0200180 out:
181 ftrace_enabled = save_ftrace_enabled;
182 tracer_enabled = save_tracer_enabled;
183
184 /* Enable tracing on all functions again */
185 ftrace_set_filter(NULL, 0, 1);
186
187 return ret;
188}
189#else
190# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
191#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100192
Steven Rostedt60a11772008-05-12 21:20:44 +0200193/*
194 * Simple verification test of ftrace function tracer.
195 * Enable ftrace, sleep 1/10 second, and then read the trace
196 * buffer to see if all is in order.
197 */
198int
199trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
200{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200201 int save_ftrace_enabled = ftrace_enabled;
202 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400203 unsigned long count;
204 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200205
Steven Rostedt77a2b372008-05-12 21:20:45 +0200206 /* make sure msleep has been recorded */
207 msleep(1);
208
Steven Rostedt60a11772008-05-12 21:20:44 +0200209 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200210 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200211 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200212
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200213 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100214 if (ret) {
215 warn_failed_init_tracer(trace, ret);
216 goto out;
217 }
218
Steven Rostedt60a11772008-05-12 21:20:44 +0200219 /* Sleep for a 1/10 of a second */
220 msleep(100);
221 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500222 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200223 ftrace_enabled = 0;
224
Steven Rostedt60a11772008-05-12 21:20:44 +0200225 /* check the trace buffer */
226 ret = trace_test_buffer(tr, &count);
227 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500228 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200229
230 if (!ret && !count) {
231 printk(KERN_CONT ".. no entries found ..");
232 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200233 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200234 }
235
Steven Rostedt77a2b372008-05-12 21:20:45 +0200236 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
237 DYN_FTRACE_TEST_NAME);
238
239 out:
240 ftrace_enabled = save_ftrace_enabled;
241 tracer_enabled = save_tracer_enabled;
242
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200243 /* kill ftrace totally if we failed */
244 if (ret)
245 ftrace_kill();
246
Steven Rostedt60a11772008-05-12 21:20:44 +0200247 return ret;
248}
Steven Rostedt606576c2008-10-06 19:06:12 -0400249#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200250
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100251
252#ifdef CONFIG_FUNCTION_GRAPH_TRACER
253/*
254 * Pretty much the same than for the function tracer from which the selftest
255 * has been borrowed.
256 */
257int
258trace_selftest_startup_function_graph(struct tracer *trace,
259 struct trace_array *tr)
260{
261 int ret;
262 unsigned long count;
263
264 ret = tracer_init(trace, tr);
265 if (ret) {
266 warn_failed_init_tracer(trace, ret);
267 goto out;
268 }
269
270 /* Sleep for a 1/10 of a second */
271 msleep(100);
272
273 tracing_stop();
274
275 /* check the trace buffer */
276 ret = trace_test_buffer(tr, &count);
277
278 trace->reset(tr);
279 tracing_start();
280
281 if (!ret && !count) {
282 printk(KERN_CONT ".. no entries found ..");
283 ret = -1;
284 goto out;
285 }
286
287 /* Don't test dynamic tracing, the function tracer already did */
288
289out:
290 /* Stop it if we failed */
291 if (ret)
292 ftrace_graph_stop();
293
294 return ret;
295}
296#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
297
298
Steven Rostedt60a11772008-05-12 21:20:44 +0200299#ifdef CONFIG_IRQSOFF_TRACER
300int
301trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
302{
303 unsigned long save_max = tracing_max_latency;
304 unsigned long count;
305 int ret;
306
307 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200308 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100309 if (ret) {
310 warn_failed_init_tracer(trace, ret);
311 return ret;
312 }
313
Steven Rostedt60a11772008-05-12 21:20:44 +0200314 /* reset the max latency */
315 tracing_max_latency = 0;
316 /* disable interrupts for a bit */
317 local_irq_disable();
318 udelay(100);
319 local_irq_enable();
320 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500321 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200322 /* check both trace buffers */
323 ret = trace_test_buffer(tr, NULL);
324 if (!ret)
325 ret = trace_test_buffer(&max_tr, &count);
326 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500327 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200328
329 if (!ret && !count) {
330 printk(KERN_CONT ".. no entries found ..");
331 ret = -1;
332 }
333
334 tracing_max_latency = save_max;
335
336 return ret;
337}
338#endif /* CONFIG_IRQSOFF_TRACER */
339
340#ifdef CONFIG_PREEMPT_TRACER
341int
342trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
343{
344 unsigned long save_max = tracing_max_latency;
345 unsigned long count;
346 int ret;
347
Steven Rostedt769c48e2008-11-07 22:36:02 -0500348 /*
349 * Now that the big kernel lock is no longer preemptable,
350 * and this is called with the BKL held, it will always
351 * fail. If preemption is already disabled, simply
352 * pass the test. When the BKL is removed, or becomes
353 * preemptible again, we will once again test this,
354 * so keep it in.
355 */
356 if (preempt_count()) {
357 printk(KERN_CONT "can not test ... force ");
358 return 0;
359 }
360
Steven Rostedt60a11772008-05-12 21:20:44 +0200361 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200362 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100363 if (ret) {
364 warn_failed_init_tracer(trace, ret);
365 return ret;
366 }
367
Steven Rostedt60a11772008-05-12 21:20:44 +0200368 /* reset the max latency */
369 tracing_max_latency = 0;
370 /* disable preemption for a bit */
371 preempt_disable();
372 udelay(100);
373 preempt_enable();
374 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500375 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200376 /* check both trace buffers */
377 ret = trace_test_buffer(tr, NULL);
378 if (!ret)
379 ret = trace_test_buffer(&max_tr, &count);
380 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500381 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200382
383 if (!ret && !count) {
384 printk(KERN_CONT ".. no entries found ..");
385 ret = -1;
386 }
387
388 tracing_max_latency = save_max;
389
390 return ret;
391}
392#endif /* CONFIG_PREEMPT_TRACER */
393
394#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
395int
396trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
397{
398 unsigned long save_max = tracing_max_latency;
399 unsigned long count;
400 int ret;
401
Steven Rostedt769c48e2008-11-07 22:36:02 -0500402 /*
403 * Now that the big kernel lock is no longer preemptable,
404 * and this is called with the BKL held, it will always
405 * fail. If preemption is already disabled, simply
406 * pass the test. When the BKL is removed, or becomes
407 * preemptible again, we will once again test this,
408 * so keep it in.
409 */
410 if (preempt_count()) {
411 printk(KERN_CONT "can not test ... force ");
412 return 0;
413 }
414
Steven Rostedt60a11772008-05-12 21:20:44 +0200415 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200416 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100417 if (ret) {
418 warn_failed_init_tracer(trace, ret);
419 goto out;
420 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200421
422 /* reset the max latency */
423 tracing_max_latency = 0;
424
425 /* disable preemption and interrupts for a bit */
426 preempt_disable();
427 local_irq_disable();
428 udelay(100);
429 preempt_enable();
430 /* reverse the order of preempt vs irqs */
431 local_irq_enable();
432
433 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500434 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200435 /* check both trace buffers */
436 ret = trace_test_buffer(tr, NULL);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500437 if (ret) {
438 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200439 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500440 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200441
442 ret = trace_test_buffer(&max_tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500443 if (ret) {
444 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200445 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500446 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200447
448 if (!ret && !count) {
449 printk(KERN_CONT ".. no entries found ..");
450 ret = -1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500451 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200452 goto out;
453 }
454
455 /* do the test by disabling interrupts first this time */
456 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500457 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200458 preempt_disable();
459 local_irq_disable();
460 udelay(100);
461 preempt_enable();
462 /* reverse the order of preempt vs irqs */
463 local_irq_enable();
464
465 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500466 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200467 /* check both trace buffers */
468 ret = trace_test_buffer(tr, NULL);
469 if (ret)
470 goto out;
471
472 ret = trace_test_buffer(&max_tr, &count);
473
474 if (!ret && !count) {
475 printk(KERN_CONT ".. no entries found ..");
476 ret = -1;
477 goto out;
478 }
479
480 out:
481 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500482 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200483 tracing_max_latency = save_max;
484
485 return ret;
486}
487#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
488
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700489#ifdef CONFIG_NOP_TRACER
490int
491trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
492{
493 /* What could possibly go wrong? */
494 return 0;
495}
496#endif
497
Steven Rostedt60a11772008-05-12 21:20:44 +0200498#ifdef CONFIG_SCHED_TRACER
499static int trace_wakeup_test_thread(void *data)
500{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200501 /* Make this a RT thread, doesn't need to be too high */
502 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200503 struct completion *x = data;
504
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200505 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200506
507 /* Make it know we have a new prio */
508 complete(x);
509
510 /* now go to sleep and let the test wake us up */
511 set_current_state(TASK_INTERRUPTIBLE);
512 schedule();
513
514 /* we are awake, now wait to disappear */
515 while (!kthread_should_stop()) {
516 /*
517 * This is an RT task, do short sleeps to let
518 * others run.
519 */
520 msleep(100);
521 }
522
523 return 0;
524}
525
526int
527trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
528{
529 unsigned long save_max = tracing_max_latency;
530 struct task_struct *p;
531 struct completion isrt;
532 unsigned long count;
533 int ret;
534
535 init_completion(&isrt);
536
537 /* create a high prio thread */
538 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200539 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200540 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
541 return -1;
542 }
543
544 /* make sure the thread is running at an RT prio */
545 wait_for_completion(&isrt);
546
547 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200548 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100549 if (ret) {
550 warn_failed_init_tracer(trace, ret);
551 return ret;
552 }
553
Steven Rostedt60a11772008-05-12 21:20:44 +0200554 /* reset the max latency */
555 tracing_max_latency = 0;
556
557 /* sleep to let the RT thread sleep too */
558 msleep(100);
559
560 /*
561 * Yes this is slightly racy. It is possible that for some
562 * strange reason that the RT thread we created, did not
563 * call schedule for 100ms after doing the completion,
564 * and we do a wakeup on a task that already is awake.
565 * But that is extremely unlikely, and the worst thing that
566 * happens in such a case, is that we disable tracing.
567 * Honestly, if this race does happen something is horrible
568 * wrong with the system.
569 */
570
571 wake_up_process(p);
572
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400573 /* give a little time to let the thread wake up */
574 msleep(100);
575
Steven Rostedt60a11772008-05-12 21:20:44 +0200576 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500577 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200578 /* check both trace buffers */
579 ret = trace_test_buffer(tr, NULL);
580 if (!ret)
581 ret = trace_test_buffer(&max_tr, &count);
582
583
584 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500585 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200586
587 tracing_max_latency = save_max;
588
589 /* kill the thread */
590 kthread_stop(p);
591
592 if (!ret && !count) {
593 printk(KERN_CONT ".. no entries found ..");
594 ret = -1;
595 }
596
597 return ret;
598}
599#endif /* CONFIG_SCHED_TRACER */
600
601#ifdef CONFIG_CONTEXT_SWITCH_TRACER
602int
603trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
604{
605 unsigned long count;
606 int ret;
607
608 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200609 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100610 if (ret) {
611 warn_failed_init_tracer(trace, ret);
612 return ret;
613 }
614
Steven Rostedt60a11772008-05-12 21:20:44 +0200615 /* Sleep for a 1/10 of a second */
616 msleep(100);
617 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500618 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200619 /* check the trace buffer */
620 ret = trace_test_buffer(tr, &count);
621 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500622 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200623
624 if (!ret && !count) {
625 printk(KERN_CONT ".. no entries found ..");
626 ret = -1;
627 }
628
629 return ret;
630}
631#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200632
633#ifdef CONFIG_SYSPROF_TRACER
634int
635trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
636{
637 unsigned long count;
638 int ret;
639
640 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200641 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100642 if (ret) {
643 warn_failed_init_tracer(trace, ret);
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500644 return ret;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100645 }
646
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200647 /* Sleep for a 1/10 of a second */
648 msleep(100);
649 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500650 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200651 /* check the trace buffer */
652 ret = trace_test_buffer(tr, &count);
653 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500654 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200655
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500656 if (!ret && !count) {
657 printk(KERN_CONT ".. no entries found ..");
658 ret = -1;
659 }
660
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200661 return ret;
662}
663#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500664
665#ifdef CONFIG_BRANCH_TRACER
666int
667trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
668{
669 unsigned long count;
670 int ret;
671
672 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200673 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100674 if (ret) {
675 warn_failed_init_tracer(trace, ret);
676 return ret;
677 }
678
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500679 /* Sleep for a 1/10 of a second */
680 msleep(100);
681 /* stop the tracing. */
682 tracing_stop();
683 /* check the trace buffer */
684 ret = trace_test_buffer(tr, &count);
685 trace->reset(tr);
686 tracing_start();
687
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500688 if (!ret && !count) {
689 printk(KERN_CONT ".. no entries found ..");
690 ret = -1;
691 }
692
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500693 return ret;
694}
695#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +0100696
697#ifdef CONFIG_HW_BRANCH_TRACER
698int
699trace_selftest_startup_hw_branches(struct tracer *trace,
700 struct trace_array *tr)
701{
Markus Metzger321bb5e2009-03-13 10:50:27 +0100702 struct trace_iterator iter;
703 struct tracer tracer;
Ingo Molnare9a22d12009-03-13 11:54:40 +0100704 unsigned long count;
705 int ret;
Markus Metzger321bb5e2009-03-13 10:50:27 +0100706
707 if (!trace->open) {
708 printk(KERN_CONT "missing open function...");
709 return -1;
710 }
711
712 ret = tracer_init(trace, tr);
713 if (ret) {
714 warn_failed_init_tracer(trace, ret);
715 return ret;
716 }
717
718 /*
719 * The hw-branch tracer needs to collect the trace from the various
720 * cpu trace buffers - before tracing is stopped.
721 */
722 memset(&iter, 0, sizeof(iter));
723 memcpy(&tracer, trace, sizeof(tracer));
724
725 iter.trace = &tracer;
726 iter.tr = tr;
727 iter.pos = -1;
728 mutex_init(&iter.mutex);
729
730 trace->open(&iter);
731
732 mutex_destroy(&iter.mutex);
733
734 tracing_stop();
735
736 ret = trace_test_buffer(tr, &count);
737 trace->reset(tr);
738 tracing_start();
739
740 if (!ret && !count) {
741 printk(KERN_CONT "no entries found..");
742 ret = -1;
743 }
744
745 return ret;
746}
747#endif /* CONFIG_HW_BRANCH_TRACER */