blob: 280fea470d67ea603f230d8bfc7868d5d48e62b2 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Steven Rostedt9cc26a22009-03-09 16:00:22 -04003#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02004#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02005#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02006
Ingo Molnare309b412008-05-12 21:20:51 +02007static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02008{
9 switch (entry->type) {
10 case TRACE_FN:
11 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020012 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020013 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040014 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020015 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050016 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010017 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET:
Markus Metzger321bb5e2009-03-13 10:50:27 +010019 case TRACE_HW_BRANCHES:
K.Prasad0722db02009-06-01 23:46:40 +053020 case TRACE_KSYM:
Steven Rostedt60a11772008-05-12 21:20:44 +020021 return 1;
22 }
23 return 0;
24}
25
Steven Rostedt3928a8a2008-09-29 23:02:41 -040026static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020027{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040028 struct ring_buffer_event *event;
29 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050030 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020031
Steven Rostedt3928a8a2008-09-29 23:02:41 -040032 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
33 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020034
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050035 /*
36 * The ring buffer is a size of trace_buf_size, if
37 * we loop more than the size, there's something wrong
38 * with the ring buffer.
39 */
40 if (loops++ > trace_buf_size) {
41 printk(KERN_CONT ".. bad ring buffer ");
42 goto failed;
43 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040044 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020045 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040046 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020047 goto failed;
48 }
Steven Rostedt60a11772008-05-12 21:20:44 +020049 }
Steven Rostedt60a11772008-05-12 21:20:44 +020050 return 0;
51
52 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020053 /* disable tracing */
54 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020055 printk(KERN_CONT ".. corrupted trace buffer .. ");
56 return -1;
57}
58
59/*
60 * Test the trace buffer to see if all the elements
61 * are still sane.
62 */
63static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
64{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020065 unsigned long flags, cnt = 0;
66 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020067
Steven Rostedt30afdcb2008-05-12 21:20:56 +020068 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050069 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010070 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040071
72 cnt = ring_buffer_entries(tr->buffer);
73
Steven Rostedt0c5119c2009-02-18 18:33:57 -050074 /*
75 * The trace_test_buffer_cpu runs a while loop to consume all data.
76 * If the calling tracer is broken, and is constantly filling
77 * the buffer, this will run forever, and hard lock the box.
78 * We disable the ring buffer while we do this test to prevent
79 * a hard lock up.
80 */
81 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020082 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040083 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020084 if (ret)
85 break;
86 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050087 tracing_on();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010088 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050089 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020090
91 if (count)
92 *count = cnt;
93
94 return ret;
95}
96
Frederic Weisbecker1c800252008-11-16 05:57:26 +010097static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
98{
99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
100 trace->name, init_ret);
101}
Steven Rostedt606576c2008-10-06 19:06:12 -0400102#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200103
104#ifdef CONFIG_DYNAMIC_FTRACE
105
Steven Rostedt77a2b372008-05-12 21:20:45 +0200106/* Test dynamic code modification and ftrace filters */
107int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
108 struct trace_array *tr,
109 int (*func)(void))
110{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200111 int save_ftrace_enabled = ftrace_enabled;
112 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400113 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400114 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400115 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200116
117 /* The ftrace test PASSED */
118 printk(KERN_CONT "PASSED\n");
119 pr_info("Testing dynamic ftrace: ");
120
121 /* enable tracing, and record the filter function */
122 ftrace_enabled = 1;
123 tracer_enabled = 1;
124
125 /* passed in by parameter to fool gcc from optimizing */
126 func();
127
Steven Rostedt4e491d12008-05-14 23:49:44 -0400128 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500129 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400130 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500131 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400132 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400133 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400134
Steven Rostedt77a2b372008-05-12 21:20:45 +0200135 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400136 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200137
138 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200139 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100140 if (ret) {
141 warn_failed_init_tracer(trace, ret);
142 goto out;
143 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400144
Steven Rostedt77a2b372008-05-12 21:20:45 +0200145 /* Sleep for a 1/10 of a second */
146 msleep(100);
147
148 /* we should have nothing in the buffer */
149 ret = trace_test_buffer(tr, &count);
150 if (ret)
151 goto out;
152
153 if (count) {
154 ret = -1;
155 printk(KERN_CONT ".. filter did not filter .. ");
156 goto out;
157 }
158
159 /* call our function again */
160 func();
161
162 /* sleep again */
163 msleep(100);
164
165 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500166 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200167 ftrace_enabled = 0;
168
169 /* check the trace buffer */
170 ret = trace_test_buffer(tr, &count);
171 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500172 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200173
174 /* we should only have one item */
175 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200176 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200177 ret = -1;
178 goto out;
179 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500180
Steven Rostedt77a2b372008-05-12 21:20:45 +0200181 out:
182 ftrace_enabled = save_ftrace_enabled;
183 tracer_enabled = save_tracer_enabled;
184
185 /* Enable tracing on all functions again */
186 ftrace_set_filter(NULL, 0, 1);
187
188 return ret;
189}
190#else
191# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
192#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100193
Steven Rostedt60a11772008-05-12 21:20:44 +0200194/*
195 * Simple verification test of ftrace function tracer.
196 * Enable ftrace, sleep 1/10 second, and then read the trace
197 * buffer to see if all is in order.
198 */
199int
200trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
201{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200202 int save_ftrace_enabled = ftrace_enabled;
203 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400204 unsigned long count;
205 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200206
Steven Rostedt77a2b372008-05-12 21:20:45 +0200207 /* make sure msleep has been recorded */
208 msleep(1);
209
Steven Rostedt60a11772008-05-12 21:20:44 +0200210 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200211 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200212 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200213
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200214 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100215 if (ret) {
216 warn_failed_init_tracer(trace, ret);
217 goto out;
218 }
219
Steven Rostedt60a11772008-05-12 21:20:44 +0200220 /* Sleep for a 1/10 of a second */
221 msleep(100);
222 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500223 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200224 ftrace_enabled = 0;
225
Steven Rostedt60a11772008-05-12 21:20:44 +0200226 /* check the trace buffer */
227 ret = trace_test_buffer(tr, &count);
228 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500229 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200230
231 if (!ret && !count) {
232 printk(KERN_CONT ".. no entries found ..");
233 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200234 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200235 }
236
Steven Rostedt77a2b372008-05-12 21:20:45 +0200237 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
238 DYN_FTRACE_TEST_NAME);
239
240 out:
241 ftrace_enabled = save_ftrace_enabled;
242 tracer_enabled = save_tracer_enabled;
243
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200244 /* kill ftrace totally if we failed */
245 if (ret)
246 ftrace_kill();
247
Steven Rostedt60a11772008-05-12 21:20:44 +0200248 return ret;
249}
Steven Rostedt606576c2008-10-06 19:06:12 -0400250#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200251
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100252
253#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100254
255/* Maximum number of functions to trace before diagnosing a hang */
256#define GRAPH_MAX_FUNC_TEST 100000000
257
258static void __ftrace_dump(bool disable_tracing);
259static unsigned int graph_hang_thresh;
260
261/* Wrap the real function entry probe to avoid possible hanging */
262static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
263{
264 /* This is harmlessly racy, we want to approximately detect a hang */
265 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
266 ftrace_graph_stop();
267 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
268 if (ftrace_dump_on_oops)
269 __ftrace_dump(false);
270 return 0;
271 }
272
273 return trace_graph_entry(trace);
274}
275
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100276/*
277 * Pretty much the same than for the function tracer from which the selftest
278 * has been borrowed.
279 */
280int
281trace_selftest_startup_function_graph(struct tracer *trace,
282 struct trace_array *tr)
283{
284 int ret;
285 unsigned long count;
286
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100287 /*
288 * Simulate the init() callback but we attach a watchdog callback
289 * to detect and recover from possible hangs
290 */
291 tracing_reset_online_cpus(tr);
Frederic Weisbecker1a0799a2009-07-29 18:59:58 +0200292 set_graph_array(tr);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100293 ret = register_ftrace_graph(&trace_graph_return,
294 &trace_graph_entry_watchdog);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100295 if (ret) {
296 warn_failed_init_tracer(trace, ret);
297 goto out;
298 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100299 tracing_start_cmdline_record();
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100300
301 /* Sleep for a 1/10 of a second */
302 msleep(100);
303
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100304 /* Have we just recovered from a hang? */
305 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
Frederic Weisbecker0cf53ff2009-03-22 15:13:07 +0100306 tracing_selftest_disabled = true;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100307 ret = -1;
308 goto out;
309 }
310
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100311 tracing_stop();
312
313 /* check the trace buffer */
314 ret = trace_test_buffer(tr, &count);
315
316 trace->reset(tr);
317 tracing_start();
318
319 if (!ret && !count) {
320 printk(KERN_CONT ".. no entries found ..");
321 ret = -1;
322 goto out;
323 }
324
325 /* Don't test dynamic tracing, the function tracer already did */
326
327out:
328 /* Stop it if we failed */
329 if (ret)
330 ftrace_graph_stop();
331
332 return ret;
333}
334#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
335
336
Steven Rostedt60a11772008-05-12 21:20:44 +0200337#ifdef CONFIG_IRQSOFF_TRACER
338int
339trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
340{
341 unsigned long save_max = tracing_max_latency;
342 unsigned long count;
343 int ret;
344
345 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200346 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100347 if (ret) {
348 warn_failed_init_tracer(trace, ret);
349 return ret;
350 }
351
Steven Rostedt60a11772008-05-12 21:20:44 +0200352 /* reset the max latency */
353 tracing_max_latency = 0;
354 /* disable interrupts for a bit */
355 local_irq_disable();
356 udelay(100);
357 local_irq_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100358
359 /*
360 * Stop the tracer to avoid a warning subsequent
361 * to buffer flipping failure because tracing_stop()
362 * disables the tr and max buffers, making flipping impossible
363 * in case of parallels max irqs off latencies.
364 */
365 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200366 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500367 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200368 /* check both trace buffers */
369 ret = trace_test_buffer(tr, NULL);
370 if (!ret)
371 ret = trace_test_buffer(&max_tr, &count);
372 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500373 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200374
375 if (!ret && !count) {
376 printk(KERN_CONT ".. no entries found ..");
377 ret = -1;
378 }
379
380 tracing_max_latency = save_max;
381
382 return ret;
383}
384#endif /* CONFIG_IRQSOFF_TRACER */
385
386#ifdef CONFIG_PREEMPT_TRACER
387int
388trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
389{
390 unsigned long save_max = tracing_max_latency;
391 unsigned long count;
392 int ret;
393
Steven Rostedt769c48e2008-11-07 22:36:02 -0500394 /*
395 * Now that the big kernel lock is no longer preemptable,
396 * and this is called with the BKL held, it will always
397 * fail. If preemption is already disabled, simply
398 * pass the test. When the BKL is removed, or becomes
399 * preemptible again, we will once again test this,
400 * so keep it in.
401 */
402 if (preempt_count()) {
403 printk(KERN_CONT "can not test ... force ");
404 return 0;
405 }
406
Steven Rostedt60a11772008-05-12 21:20:44 +0200407 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200408 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100409 if (ret) {
410 warn_failed_init_tracer(trace, ret);
411 return ret;
412 }
413
Steven Rostedt60a11772008-05-12 21:20:44 +0200414 /* reset the max latency */
415 tracing_max_latency = 0;
416 /* disable preemption for a bit */
417 preempt_disable();
418 udelay(100);
419 preempt_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100420
421 /*
422 * Stop the tracer to avoid a warning subsequent
423 * to buffer flipping failure because tracing_stop()
424 * disables the tr and max buffers, making flipping impossible
425 * in case of parallels max preempt off latencies.
426 */
427 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200428 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500429 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200430 /* check both trace buffers */
431 ret = trace_test_buffer(tr, NULL);
432 if (!ret)
433 ret = trace_test_buffer(&max_tr, &count);
434 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500435 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200436
437 if (!ret && !count) {
438 printk(KERN_CONT ".. no entries found ..");
439 ret = -1;
440 }
441
442 tracing_max_latency = save_max;
443
444 return ret;
445}
446#endif /* CONFIG_PREEMPT_TRACER */
447
448#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
449int
450trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
451{
452 unsigned long save_max = tracing_max_latency;
453 unsigned long count;
454 int ret;
455
Steven Rostedt769c48e2008-11-07 22:36:02 -0500456 /*
457 * Now that the big kernel lock is no longer preemptable,
458 * and this is called with the BKL held, it will always
459 * fail. If preemption is already disabled, simply
460 * pass the test. When the BKL is removed, or becomes
461 * preemptible again, we will once again test this,
462 * so keep it in.
463 */
464 if (preempt_count()) {
465 printk(KERN_CONT "can not test ... force ");
466 return 0;
467 }
468
Steven Rostedt60a11772008-05-12 21:20:44 +0200469 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200470 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100471 if (ret) {
472 warn_failed_init_tracer(trace, ret);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100473 goto out_no_start;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100474 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200475
476 /* reset the max latency */
477 tracing_max_latency = 0;
478
479 /* disable preemption and interrupts for a bit */
480 preempt_disable();
481 local_irq_disable();
482 udelay(100);
483 preempt_enable();
484 /* reverse the order of preempt vs irqs */
485 local_irq_enable();
486
Frederic Weisbecker49036202009-03-17 22:38:58 +0100487 /*
488 * Stop the tracer to avoid a warning subsequent
489 * to buffer flipping failure because tracing_stop()
490 * disables the tr and max buffers, making flipping impossible
491 * in case of parallels max irqs/preempt off latencies.
492 */
493 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200494 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500495 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200496 /* check both trace buffers */
497 ret = trace_test_buffer(tr, NULL);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100498 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200499 goto out;
500
501 ret = trace_test_buffer(&max_tr, &count);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100502 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200503 goto out;
504
505 if (!ret && !count) {
506 printk(KERN_CONT ".. no entries found ..");
507 ret = -1;
508 goto out;
509 }
510
511 /* do the test by disabling interrupts first this time */
512 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500513 tracing_start();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100514 trace->start(tr);
515
Steven Rostedt60a11772008-05-12 21:20:44 +0200516 preempt_disable();
517 local_irq_disable();
518 udelay(100);
519 preempt_enable();
520 /* reverse the order of preempt vs irqs */
521 local_irq_enable();
522
Frederic Weisbecker49036202009-03-17 22:38:58 +0100523 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200524 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500525 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200526 /* check both trace buffers */
527 ret = trace_test_buffer(tr, NULL);
528 if (ret)
529 goto out;
530
531 ret = trace_test_buffer(&max_tr, &count);
532
533 if (!ret && !count) {
534 printk(KERN_CONT ".. no entries found ..");
535 ret = -1;
536 goto out;
537 }
538
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100539out:
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500540 tracing_start();
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100541out_no_start:
542 trace->reset(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200543 tracing_max_latency = save_max;
544
545 return ret;
546}
547#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
548
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700549#ifdef CONFIG_NOP_TRACER
550int
551trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
552{
553 /* What could possibly go wrong? */
554 return 0;
555}
556#endif
557
Steven Rostedt60a11772008-05-12 21:20:44 +0200558#ifdef CONFIG_SCHED_TRACER
559static int trace_wakeup_test_thread(void *data)
560{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200561 /* Make this a RT thread, doesn't need to be too high */
562 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200563 struct completion *x = data;
564
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200565 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200566
567 /* Make it know we have a new prio */
568 complete(x);
569
570 /* now go to sleep and let the test wake us up */
571 set_current_state(TASK_INTERRUPTIBLE);
572 schedule();
573
574 /* we are awake, now wait to disappear */
575 while (!kthread_should_stop()) {
576 /*
577 * This is an RT task, do short sleeps to let
578 * others run.
579 */
580 msleep(100);
581 }
582
583 return 0;
584}
585
586int
587trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
588{
589 unsigned long save_max = tracing_max_latency;
590 struct task_struct *p;
591 struct completion isrt;
592 unsigned long count;
593 int ret;
594
595 init_completion(&isrt);
596
597 /* create a high prio thread */
598 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200599 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200600 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
601 return -1;
602 }
603
604 /* make sure the thread is running at an RT prio */
605 wait_for_completion(&isrt);
606
607 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200608 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100609 if (ret) {
610 warn_failed_init_tracer(trace, ret);
611 return ret;
612 }
613
Steven Rostedt60a11772008-05-12 21:20:44 +0200614 /* reset the max latency */
615 tracing_max_latency = 0;
616
617 /* sleep to let the RT thread sleep too */
618 msleep(100);
619
620 /*
621 * Yes this is slightly racy. It is possible that for some
622 * strange reason that the RT thread we created, did not
623 * call schedule for 100ms after doing the completion,
624 * and we do a wakeup on a task that already is awake.
625 * But that is extremely unlikely, and the worst thing that
626 * happens in such a case, is that we disable tracing.
627 * Honestly, if this race does happen something is horrible
628 * wrong with the system.
629 */
630
631 wake_up_process(p);
632
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400633 /* give a little time to let the thread wake up */
634 msleep(100);
635
Steven Rostedt60a11772008-05-12 21:20:44 +0200636 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500637 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200638 /* check both trace buffers */
639 ret = trace_test_buffer(tr, NULL);
640 if (!ret)
641 ret = trace_test_buffer(&max_tr, &count);
642
643
644 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500645 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200646
647 tracing_max_latency = save_max;
648
649 /* kill the thread */
650 kthread_stop(p);
651
652 if (!ret && !count) {
653 printk(KERN_CONT ".. no entries found ..");
654 ret = -1;
655 }
656
657 return ret;
658}
659#endif /* CONFIG_SCHED_TRACER */
660
661#ifdef CONFIG_CONTEXT_SWITCH_TRACER
662int
663trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
664{
665 unsigned long count;
666 int ret;
667
668 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200669 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100670 if (ret) {
671 warn_failed_init_tracer(trace, ret);
672 return ret;
673 }
674
Steven Rostedt60a11772008-05-12 21:20:44 +0200675 /* Sleep for a 1/10 of a second */
676 msleep(100);
677 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500678 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200679 /* check the trace buffer */
680 ret = trace_test_buffer(tr, &count);
681 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500682 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200683
684 if (!ret && !count) {
685 printk(KERN_CONT ".. no entries found ..");
686 ret = -1;
687 }
688
689 return ret;
690}
691#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200692
693#ifdef CONFIG_SYSPROF_TRACER
694int
695trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
696{
697 unsigned long count;
698 int ret;
699
700 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200701 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100702 if (ret) {
703 warn_failed_init_tracer(trace, ret);
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500704 return ret;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100705 }
706
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200707 /* Sleep for a 1/10 of a second */
708 msleep(100);
709 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500710 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200711 /* check the trace buffer */
712 ret = trace_test_buffer(tr, &count);
713 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500714 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200715
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500716 if (!ret && !count) {
717 printk(KERN_CONT ".. no entries found ..");
718 ret = -1;
719 }
720
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200721 return ret;
722}
723#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500724
725#ifdef CONFIG_BRANCH_TRACER
726int
727trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
728{
729 unsigned long count;
730 int ret;
731
732 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200733 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100734 if (ret) {
735 warn_failed_init_tracer(trace, ret);
736 return ret;
737 }
738
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500739 /* Sleep for a 1/10 of a second */
740 msleep(100);
741 /* stop the tracing. */
742 tracing_stop();
743 /* check the trace buffer */
744 ret = trace_test_buffer(tr, &count);
745 trace->reset(tr);
746 tracing_start();
747
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500748 if (!ret && !count) {
749 printk(KERN_CONT ".. no entries found ..");
750 ret = -1;
751 }
752
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500753 return ret;
754}
755#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +0100756
757#ifdef CONFIG_HW_BRANCH_TRACER
758int
759trace_selftest_startup_hw_branches(struct tracer *trace,
760 struct trace_array *tr)
761{
Markus Metzger4d657e52009-04-03 16:43:41 +0200762 struct trace_iterator *iter;
Markus Metzger321bb5e2009-03-13 10:50:27 +0100763 struct tracer tracer;
Ingo Molnare9a22d12009-03-13 11:54:40 +0100764 unsigned long count;
765 int ret;
Markus Metzger321bb5e2009-03-13 10:50:27 +0100766
767 if (!trace->open) {
768 printk(KERN_CONT "missing open function...");
769 return -1;
770 }
771
772 ret = tracer_init(trace, tr);
773 if (ret) {
774 warn_failed_init_tracer(trace, ret);
775 return ret;
776 }
777
778 /*
779 * The hw-branch tracer needs to collect the trace from the various
780 * cpu trace buffers - before tracing is stopped.
781 */
Markus Metzger4d657e52009-04-03 16:43:41 +0200782 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
783 if (!iter)
784 return -ENOMEM;
785
Markus Metzger321bb5e2009-03-13 10:50:27 +0100786 memcpy(&tracer, trace, sizeof(tracer));
787
Markus Metzger4d657e52009-04-03 16:43:41 +0200788 iter->trace = &tracer;
789 iter->tr = tr;
790 iter->pos = -1;
791 mutex_init(&iter->mutex);
Markus Metzger321bb5e2009-03-13 10:50:27 +0100792
Markus Metzger4d657e52009-04-03 16:43:41 +0200793 trace->open(iter);
Markus Metzger321bb5e2009-03-13 10:50:27 +0100794
Markus Metzger4d657e52009-04-03 16:43:41 +0200795 mutex_destroy(&iter->mutex);
796 kfree(iter);
Markus Metzger321bb5e2009-03-13 10:50:27 +0100797
798 tracing_stop();
799
800 ret = trace_test_buffer(tr, &count);
801 trace->reset(tr);
802 tracing_start();
803
804 if (!ret && !count) {
805 printk(KERN_CONT "no entries found..");
806 ret = -1;
807 }
808
809 return ret;
810}
811#endif /* CONFIG_HW_BRANCH_TRACER */
K.Prasad0722db02009-06-01 23:46:40 +0530812
813#ifdef CONFIG_KSYM_TRACER
814static int ksym_selftest_dummy;
815
816int
817trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
818{
819 unsigned long count;
820 int ret;
821
822 /* start the tracing */
823 ret = tracer_init(trace, tr);
824 if (ret) {
825 warn_failed_init_tracer(trace, ret);
826 return ret;
827 }
828
829 ksym_selftest_dummy = 0;
830 /* Register the read-write tracing request */
Li Zefan30ff21e32009-09-10 09:35:20 +0800831
832 ret = process_new_ksym_entry("ksym_selftest_dummy",
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200833 HW_BREAKPOINT_R | HW_BREAKPOINT_W,
K.Prasad0722db02009-06-01 23:46:40 +0530834 (unsigned long)(&ksym_selftest_dummy));
835
836 if (ret < 0) {
837 printk(KERN_CONT "ksym_trace read-write startup test failed\n");
838 goto ret_path;
839 }
840 /* Perform a read and a write operation over the dummy variable to
841 * trigger the tracer
842 */
843 if (ksym_selftest_dummy == 0)
844 ksym_selftest_dummy++;
845
846 /* stop the tracing. */
847 tracing_stop();
848 /* check the trace buffer */
849 ret = trace_test_buffer(tr, &count);
850 trace->reset(tr);
851 tracing_start();
852
853 /* read & write operations - one each is performed on the dummy variable
854 * triggering two entries in the trace buffer
855 */
856 if (!ret && count != 2) {
857 printk(KERN_CONT "Ksym tracer startup test failed");
858 ret = -1;
859 }
860
861ret_path:
862 return ret;
863}
864#endif /* CONFIG_KSYM_TRACER */
865