blob: cb917cebae291bfbf00a86ea67e69d151b626e27 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
Ingo Molnarae7e81c2017-02-01 18:07:51 +01003#include <uapi/linux/sched/types.h>
Steven Rostedt9cc26a22009-03-09 16:00:22 -04004#include <linux/stringify.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02005#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02006#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02008
Ingo Molnare309b412008-05-12 21:20:51 +02009static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +020010{
11 switch (entry->type) {
12 case TRACE_FN:
13 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020014 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020015 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040016 case TRACE_PRINT:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050017 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010018 case TRACE_GRAPH_ENT:
19 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020020 return 1;
21 }
22 return 0;
23}
24
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050025static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020026{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040027 struct ring_buffer_event *event;
28 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050029 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020030
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050031 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040032 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020033
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050034 /*
35 * The ring buffer is a size of trace_buf_size, if
36 * we loop more than the size, there's something wrong
37 * with the ring buffer.
38 */
39 if (loops++ > trace_buf_size) {
40 printk(KERN_CONT ".. bad ring buffer ");
41 goto failed;
42 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040043 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020044 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040045 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020046 goto failed;
47 }
Steven Rostedt60a11772008-05-12 21:20:44 +020048 }
Steven Rostedt60a11772008-05-12 21:20:44 +020049 return 0;
50
51 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020052 /* disable tracing */
53 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020054 printk(KERN_CONT ".. corrupted trace buffer .. ");
55 return -1;
56}
57
58/*
59 * Test the trace buffer to see if all the elements
60 * are still sane.
61 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050062static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
Steven Rostedt60a11772008-05-12 21:20:44 +020063{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020064 unsigned long flags, cnt = 0;
65 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020066
Steven Rostedt30afdcb2008-05-12 21:20:56 +020067 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050068 local_irq_save(flags);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -050069 arch_spin_lock(&buf->tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040070
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050071 cnt = ring_buffer_entries(buf->buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040072
Steven Rostedt0c5119c2009-02-18 18:33:57 -050073 /*
74 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 * If the calling tracer is broken, and is constantly filling
76 * the buffer, this will run forever, and hard lock the box.
77 * We disable the ring buffer while we do this test to prevent
78 * a hard lock up.
79 */
80 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020081 for_each_possible_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -050082 ret = trace_test_buffer_cpu(buf, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020083 if (ret)
84 break;
85 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050086 tracing_on();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -050087 arch_spin_unlock(&buf->tr->max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050088 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020089
90 if (count)
91 *count = cnt;
92
93 return ret;
94}
95
Frederic Weisbecker1c800252008-11-16 05:57:26 +010096static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97{
98 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 trace->name, init_ret);
100}
Steven Rostedt606576c2008-10-06 19:06:12 -0400101#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200102
103#ifdef CONFIG_DYNAMIC_FTRACE
104
Steven Rostedt95950c22011-05-06 00:08:51 -0400105static int trace_selftest_test_probe1_cnt;
106static void trace_selftest_test_probe1_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400107 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400108 struct ftrace_ops *op,
109 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400110{
111 trace_selftest_test_probe1_cnt++;
112}
113
114static int trace_selftest_test_probe2_cnt;
115static void trace_selftest_test_probe2_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400116 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400117 struct ftrace_ops *op,
118 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400119{
120 trace_selftest_test_probe2_cnt++;
121}
122
123static int trace_selftest_test_probe3_cnt;
124static void trace_selftest_test_probe3_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400125 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400126 struct ftrace_ops *op,
127 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400128{
129 trace_selftest_test_probe3_cnt++;
130}
131
132static int trace_selftest_test_global_cnt;
133static void trace_selftest_test_global_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400134 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400135 struct ftrace_ops *op,
136 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400137{
138 trace_selftest_test_global_cnt++;
139}
140
141static int trace_selftest_test_dyn_cnt;
142static void trace_selftest_test_dyn_func(unsigned long ip,
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400143 unsigned long pip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400144 struct ftrace_ops *op,
145 struct pt_regs *pt_regs)
Steven Rostedt95950c22011-05-06 00:08:51 -0400146{
147 trace_selftest_test_dyn_cnt++;
148}
149
150static struct ftrace_ops test_probe1 = {
151 .func = trace_selftest_test_probe1_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400152 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400153};
154
155static struct ftrace_ops test_probe2 = {
156 .func = trace_selftest_test_probe2_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400157 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400158};
159
160static struct ftrace_ops test_probe3 = {
161 .func = trace_selftest_test_probe3_func,
Steven Rostedt47409742012-07-20 11:04:44 -0400162 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt95950c22011-05-06 00:08:51 -0400163};
164
Steven Rostedt95950c22011-05-06 00:08:51 -0400165static void print_counts(void)
166{
167 printk("(%d %d %d %d %d) ",
168 trace_selftest_test_probe1_cnt,
169 trace_selftest_test_probe2_cnt,
170 trace_selftest_test_probe3_cnt,
171 trace_selftest_test_global_cnt,
172 trace_selftest_test_dyn_cnt);
173}
174
175static void reset_counts(void)
176{
177 trace_selftest_test_probe1_cnt = 0;
178 trace_selftest_test_probe2_cnt = 0;
179 trace_selftest_test_probe3_cnt = 0;
180 trace_selftest_test_global_cnt = 0;
181 trace_selftest_test_dyn_cnt = 0;
182}
183
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500184static int trace_selftest_ops(struct trace_array *tr, int cnt)
Steven Rostedt95950c22011-05-06 00:08:51 -0400185{
186 int save_ftrace_enabled = ftrace_enabled;
187 struct ftrace_ops *dyn_ops;
188 char *func1_name;
189 char *func2_name;
190 int len1;
191 int len2;
192 int ret = -1;
193
194 printk(KERN_CONT "PASSED\n");
195 pr_info("Testing dynamic ftrace ops #%d: ", cnt);
196
197 ftrace_enabled = 1;
198 reset_counts();
199
200 /* Handle PPC64 '.' name */
201 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
202 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
203 len1 = strlen(func1_name);
204 len2 = strlen(func2_name);
205
206 /*
207 * Probe 1 will trace function 1.
208 * Probe 2 will trace function 2.
209 * Probe 3 will trace functions 1 and 2.
210 */
211 ftrace_set_filter(&test_probe1, func1_name, len1, 1);
212 ftrace_set_filter(&test_probe2, func2_name, len2, 1);
213 ftrace_set_filter(&test_probe3, func1_name, len1, 1);
214 ftrace_set_filter(&test_probe3, func2_name, len2, 0);
215
216 register_ftrace_function(&test_probe1);
217 register_ftrace_function(&test_probe2);
218 register_ftrace_function(&test_probe3);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500219 /* First time we are running with main function */
220 if (cnt > 1) {
221 ftrace_init_array_ops(tr, trace_selftest_test_global_func);
222 register_ftrace_function(tr->ops);
223 }
Steven Rostedt95950c22011-05-06 00:08:51 -0400224
225 DYN_FTRACE_TEST_NAME();
226
227 print_counts();
228
229 if (trace_selftest_test_probe1_cnt != 1)
230 goto out;
231 if (trace_selftest_test_probe2_cnt != 0)
232 goto out;
233 if (trace_selftest_test_probe3_cnt != 1)
234 goto out;
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500235 if (cnt > 1) {
236 if (trace_selftest_test_global_cnt == 0)
237 goto out;
238 }
Steven Rostedt95950c22011-05-06 00:08:51 -0400239
240 DYN_FTRACE_TEST_NAME2();
241
242 print_counts();
243
244 if (trace_selftest_test_probe1_cnt != 1)
245 goto out;
246 if (trace_selftest_test_probe2_cnt != 1)
247 goto out;
248 if (trace_selftest_test_probe3_cnt != 2)
249 goto out;
250
251 /* Add a dynamic probe */
252 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
253 if (!dyn_ops) {
254 printk("MEMORY ERROR ");
255 goto out;
256 }
257
258 dyn_ops->func = trace_selftest_test_dyn_func;
259
260 register_ftrace_function(dyn_ops);
261
262 trace_selftest_test_global_cnt = 0;
263
264 DYN_FTRACE_TEST_NAME();
265
266 print_counts();
267
268 if (trace_selftest_test_probe1_cnt != 2)
269 goto out_free;
270 if (trace_selftest_test_probe2_cnt != 1)
271 goto out_free;
272 if (trace_selftest_test_probe3_cnt != 3)
273 goto out_free;
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500274 if (cnt > 1) {
275 if (trace_selftest_test_global_cnt == 0)
276 goto out;
277 }
Steven Rostedt95950c22011-05-06 00:08:51 -0400278 if (trace_selftest_test_dyn_cnt == 0)
279 goto out_free;
280
281 DYN_FTRACE_TEST_NAME2();
282
283 print_counts();
284
285 if (trace_selftest_test_probe1_cnt != 2)
286 goto out_free;
287 if (trace_selftest_test_probe2_cnt != 2)
288 goto out_free;
289 if (trace_selftest_test_probe3_cnt != 4)
290 goto out_free;
291
292 ret = 0;
293 out_free:
294 unregister_ftrace_function(dyn_ops);
295 kfree(dyn_ops);
296
297 out:
298 /* Purposely unregister in the same order */
299 unregister_ftrace_function(&test_probe1);
300 unregister_ftrace_function(&test_probe2);
301 unregister_ftrace_function(&test_probe3);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500302 if (cnt > 1)
303 unregister_ftrace_function(tr->ops);
304 ftrace_reset_array_ops(tr);
Steven Rostedt95950c22011-05-06 00:08:51 -0400305
306 /* Make sure everything is off */
307 reset_counts();
308 DYN_FTRACE_TEST_NAME();
309 DYN_FTRACE_TEST_NAME();
310
311 if (trace_selftest_test_probe1_cnt ||
312 trace_selftest_test_probe2_cnt ||
313 trace_selftest_test_probe3_cnt ||
314 trace_selftest_test_global_cnt ||
315 trace_selftest_test_dyn_cnt)
316 ret = -1;
317
318 ftrace_enabled = save_ftrace_enabled;
319
320 return ret;
321}
322
Steven Rostedt77a2b372008-05-12 21:20:45 +0200323/* Test dynamic code modification and ftrace filters */
Fabian Frederickad1438a2014-04-17 21:44:42 +0200324static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
325 struct trace_array *tr,
326 int (*func)(void))
Steven Rostedt77a2b372008-05-12 21:20:45 +0200327{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200328 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400329 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400330 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400331 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200332
333 /* The ftrace test PASSED */
334 printk(KERN_CONT "PASSED\n");
335 pr_info("Testing dynamic ftrace: ");
336
337 /* enable tracing, and record the filter function */
338 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200339
340 /* passed in by parameter to fool gcc from optimizing */
341 func();
342
Steven Rostedt4e491d12008-05-14 23:49:44 -0400343 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500344 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400345 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500346 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400347 */
Steven Rostedt9cc26a22009-03-09 16:00:22 -0400348 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
Steven Rostedt4e491d12008-05-14 23:49:44 -0400349
Steven Rostedt77a2b372008-05-12 21:20:45 +0200350 /* filter only on our function */
Steven Rostedt936e0742011-05-05 22:54:01 -0400351 ftrace_set_global_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200352
353 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200354 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100355 if (ret) {
356 warn_failed_init_tracer(trace, ret);
357 goto out;
358 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400359
Steven Rostedt77a2b372008-05-12 21:20:45 +0200360 /* Sleep for a 1/10 of a second */
361 msleep(100);
362
363 /* we should have nothing in the buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500364 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200365 if (ret)
366 goto out;
367
368 if (count) {
369 ret = -1;
370 printk(KERN_CONT ".. filter did not filter .. ");
371 goto out;
372 }
373
374 /* call our function again */
375 func();
376
377 /* sleep again */
378 msleep(100);
379
380 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500381 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200382 ftrace_enabled = 0;
383
384 /* check the trace buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500385 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt (Red Hat)3ddee632014-09-12 14:26:51 -0400386
387 ftrace_enabled = 1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500388 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200389
390 /* we should only have one item */
391 if (!ret && count != 1) {
Steven Rostedt95950c22011-05-06 00:08:51 -0400392 trace->reset(tr);
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200393 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200394 ret = -1;
395 goto out;
396 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500397
Steven Rostedt95950c22011-05-06 00:08:51 -0400398 /* Test the ops with global tracing running */
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500399 ret = trace_selftest_ops(tr, 1);
Steven Rostedt95950c22011-05-06 00:08:51 -0400400 trace->reset(tr);
401
Steven Rostedt77a2b372008-05-12 21:20:45 +0200402 out:
403 ftrace_enabled = save_ftrace_enabled;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200404
405 /* Enable tracing on all functions again */
Steven Rostedt936e0742011-05-05 22:54:01 -0400406 ftrace_set_global_filter(NULL, 0, 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200407
Steven Rostedt95950c22011-05-06 00:08:51 -0400408 /* Test the ops with global tracing off */
409 if (!ret)
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500410 ret = trace_selftest_ops(tr, 2);
Steven Rostedt95950c22011-05-06 00:08:51 -0400411
Steven Rostedt77a2b372008-05-12 21:20:45 +0200412 return ret;
413}
Steven Rostedtea701f12012-07-20 13:08:05 -0400414
415static int trace_selftest_recursion_cnt;
416static void trace_selftest_test_recursion_func(unsigned long ip,
417 unsigned long pip,
418 struct ftrace_ops *op,
419 struct pt_regs *pt_regs)
420{
421 /*
422 * This function is registered without the recursion safe flag.
423 * The ftrace infrastructure should provide the recursion
424 * protection. If not, this will crash the kernel!
425 */
Steven Rostedt96403882012-11-02 17:01:20 -0400426 if (trace_selftest_recursion_cnt++ > 10)
427 return;
Steven Rostedtea701f12012-07-20 13:08:05 -0400428 DYN_FTRACE_TEST_NAME();
429}
430
431static void trace_selftest_test_recursion_safe_func(unsigned long ip,
432 unsigned long pip,
433 struct ftrace_ops *op,
434 struct pt_regs *pt_regs)
435{
436 /*
437 * We said we would provide our own recursion. By calling
438 * this function again, we should recurse back into this function
439 * and count again. But this only happens if the arch supports
440 * all of ftrace features and nothing else is using the function
441 * tracing utility.
442 */
443 if (trace_selftest_recursion_cnt++)
444 return;
445 DYN_FTRACE_TEST_NAME();
446}
447
448static struct ftrace_ops test_rec_probe = {
449 .func = trace_selftest_test_recursion_func,
450};
451
452static struct ftrace_ops test_recsafe_probe = {
453 .func = trace_selftest_test_recursion_safe_func,
454 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
455};
456
457static int
458trace_selftest_function_recursion(void)
459{
460 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtea701f12012-07-20 13:08:05 -0400461 char *func_name;
462 int len;
463 int ret;
Steven Rostedtea701f12012-07-20 13:08:05 -0400464
465 /* The previous test PASSED */
466 pr_cont("PASSED\n");
467 pr_info("Testing ftrace recursion: ");
468
469
470 /* enable tracing, and record the filter function */
471 ftrace_enabled = 1;
Steven Rostedtea701f12012-07-20 13:08:05 -0400472
473 /* Handle PPC64 '.' name */
474 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
475 len = strlen(func_name);
476
477 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
478 if (ret) {
479 pr_cont("*Could not set filter* ");
480 goto out;
481 }
482
483 ret = register_ftrace_function(&test_rec_probe);
484 if (ret) {
485 pr_cont("*could not register callback* ");
486 goto out;
487 }
488
489 DYN_FTRACE_TEST_NAME();
490
491 unregister_ftrace_function(&test_rec_probe);
492
493 ret = -1;
494 if (trace_selftest_recursion_cnt != 1) {
495 pr_cont("*callback not called once (%d)* ",
496 trace_selftest_recursion_cnt);
497 goto out;
498 }
499
500 trace_selftest_recursion_cnt = 1;
501
502 pr_cont("PASSED\n");
503 pr_info("Testing ftrace recursion safe: ");
504
505 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
506 if (ret) {
507 pr_cont("*Could not set filter* ");
508 goto out;
509 }
510
511 ret = register_ftrace_function(&test_recsafe_probe);
512 if (ret) {
513 pr_cont("*could not register callback* ");
514 goto out;
515 }
516
517 DYN_FTRACE_TEST_NAME();
518
519 unregister_ftrace_function(&test_recsafe_probe);
520
Steven Rostedtea701f12012-07-20 13:08:05 -0400521 ret = -1;
Steven Rostedt05cbbf62013-01-22 23:35:11 -0500522 if (trace_selftest_recursion_cnt != 2) {
523 pr_cont("*callback not called expected 2 times (%d)* ",
524 trace_selftest_recursion_cnt);
Steven Rostedtea701f12012-07-20 13:08:05 -0400525 goto out;
526 }
527
528 ret = 0;
529out:
530 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtea701f12012-07-20 13:08:05 -0400531
532 return ret;
533}
Steven Rostedt77a2b372008-05-12 21:20:45 +0200534#else
535# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
Steven Rostedtea701f12012-07-20 13:08:05 -0400536# define trace_selftest_function_recursion() ({ 0; })
Steven Rostedt77a2b372008-05-12 21:20:45 +0200537#endif /* CONFIG_DYNAMIC_FTRACE */
Ingo Molnare9a22d12009-03-13 11:54:40 +0100538
Steven Rostedtad977722012-07-20 13:45:59 -0400539static enum {
540 TRACE_SELFTEST_REGS_START,
541 TRACE_SELFTEST_REGS_FOUND,
542 TRACE_SELFTEST_REGS_NOT_FOUND,
543} trace_selftest_regs_stat;
544
545static void trace_selftest_test_regs_func(unsigned long ip,
546 unsigned long pip,
547 struct ftrace_ops *op,
548 struct pt_regs *pt_regs)
549{
550 if (pt_regs)
551 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
552 else
553 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
554}
555
556static struct ftrace_ops test_regs_probe = {
557 .func = trace_selftest_test_regs_func,
558 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
559};
560
561static int
562trace_selftest_function_regs(void)
563{
564 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtad977722012-07-20 13:45:59 -0400565 char *func_name;
566 int len;
567 int ret;
568 int supported = 0;
569
Masami Hiramatsu06aeaae2012-09-28 17:15:17 +0900570#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
Steven Rostedtad977722012-07-20 13:45:59 -0400571 supported = 1;
572#endif
573
574 /* The previous test PASSED */
575 pr_cont("PASSED\n");
576 pr_info("Testing ftrace regs%s: ",
577 !supported ? "(no arch support)" : "");
578
579 /* enable tracing, and record the filter function */
580 ftrace_enabled = 1;
Steven Rostedtad977722012-07-20 13:45:59 -0400581
582 /* Handle PPC64 '.' name */
583 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
584 len = strlen(func_name);
585
586 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
587 /*
588 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
589 * This test really doesn't care.
590 */
591 if (ret && ret != -ENODEV) {
592 pr_cont("*Could not set filter* ");
593 goto out;
594 }
595
596 ret = register_ftrace_function(&test_regs_probe);
597 /*
598 * Now if the arch does not support passing regs, then this should
599 * have failed.
600 */
601 if (!supported) {
602 if (!ret) {
603 pr_cont("*registered save-regs without arch support* ");
604 goto out;
605 }
606 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
607 ret = register_ftrace_function(&test_regs_probe);
608 }
609 if (ret) {
610 pr_cont("*could not register callback* ");
611 goto out;
612 }
613
614
615 DYN_FTRACE_TEST_NAME();
616
617 unregister_ftrace_function(&test_regs_probe);
618
619 ret = -1;
620
621 switch (trace_selftest_regs_stat) {
622 case TRACE_SELFTEST_REGS_START:
623 pr_cont("*callback never called* ");
624 goto out;
625
626 case TRACE_SELFTEST_REGS_FOUND:
627 if (supported)
628 break;
629 pr_cont("*callback received regs without arch support* ");
630 goto out;
631
632 case TRACE_SELFTEST_REGS_NOT_FOUND:
633 if (!supported)
634 break;
635 pr_cont("*callback received NULL regs* ");
636 goto out;
637 }
638
639 ret = 0;
640out:
641 ftrace_enabled = save_ftrace_enabled;
Steven Rostedtad977722012-07-20 13:45:59 -0400642
643 return ret;
644}
645
Steven Rostedt60a11772008-05-12 21:20:44 +0200646/*
647 * Simple verification test of ftrace function tracer.
648 * Enable ftrace, sleep 1/10 second, and then read the trace
649 * buffer to see if all is in order.
650 */
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -0400651__init int
Steven Rostedt60a11772008-05-12 21:20:44 +0200652trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
653{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200654 int save_ftrace_enabled = ftrace_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400655 unsigned long count;
656 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200657
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -0400658#ifdef CONFIG_DYNAMIC_FTRACE
659 if (ftrace_filter_param) {
660 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
661 return 0;
662 }
663#endif
664
Steven Rostedt77a2b372008-05-12 21:20:45 +0200665 /* make sure msleep has been recorded */
666 msleep(1);
667
Steven Rostedt60a11772008-05-12 21:20:44 +0200668 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200669 ftrace_enabled = 1;
670
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200671 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100672 if (ret) {
673 warn_failed_init_tracer(trace, ret);
674 goto out;
675 }
676
Steven Rostedt60a11772008-05-12 21:20:44 +0200677 /* Sleep for a 1/10 of a second */
678 msleep(100);
679 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500680 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200681 ftrace_enabled = 0;
682
Steven Rostedt60a11772008-05-12 21:20:44 +0200683 /* check the trace buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500684 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt (Red Hat)3ddee632014-09-12 14:26:51 -0400685
686 ftrace_enabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +0200687 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500688 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200689
690 if (!ret && !count) {
691 printk(KERN_CONT ".. no entries found ..");
692 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200693 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200694 }
695
Steven Rostedt77a2b372008-05-12 21:20:45 +0200696 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
697 DYN_FTRACE_TEST_NAME);
Steven Rostedtea701f12012-07-20 13:08:05 -0400698 if (ret)
699 goto out;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200700
Steven Rostedtea701f12012-07-20 13:08:05 -0400701 ret = trace_selftest_function_recursion();
Steven Rostedtad977722012-07-20 13:45:59 -0400702 if (ret)
703 goto out;
704
705 ret = trace_selftest_function_regs();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200706 out:
707 ftrace_enabled = save_ftrace_enabled;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200708
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200709 /* kill ftrace totally if we failed */
710 if (ret)
711 ftrace_kill();
712
Steven Rostedt60a11772008-05-12 21:20:44 +0200713 return ret;
714}
Steven Rostedt606576c2008-10-06 19:06:12 -0400715#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200716
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100717
718#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100719
720/* Maximum number of functions to trace before diagnosing a hang */
721#define GRAPH_MAX_FUNC_TEST 100000000
722
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100723static unsigned int graph_hang_thresh;
724
725/* Wrap the real function entry probe to avoid possible hanging */
726static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
727{
728 /* This is harmlessly racy, we want to approximately detect a hang */
729 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
730 ftrace_graph_stop();
731 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -0400732 if (ftrace_dump_on_oops) {
733 ftrace_dump(DUMP_ALL);
734 /* ftrace_dump() disables tracing */
735 tracing_on();
736 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100737 return 0;
738 }
739
740 return trace_graph_entry(trace);
741}
742
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100743/*
744 * Pretty much the same than for the function tracer from which the selftest
745 * has been borrowed.
746 */
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -0400747__init int
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100748trace_selftest_startup_function_graph(struct tracer *trace,
749 struct trace_array *tr)
750{
751 int ret;
752 unsigned long count;
753
Steven Rostedt (Red Hat)f1ed7c72013-06-27 22:18:06 -0400754#ifdef CONFIG_DYNAMIC_FTRACE
755 if (ftrace_filter_param) {
756 printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
757 return 0;
758 }
759#endif
760
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100761 /*
762 * Simulate the init() callback but we attach a watchdog callback
763 * to detect and recover from possible hangs
764 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500765 tracing_reset_online_cpus(&tr->trace_buffer);
Frederic Weisbecker1a0799a2009-07-29 18:59:58 +0200766 set_graph_array(tr);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100767 ret = register_ftrace_graph(&trace_graph_return,
768 &trace_graph_entry_watchdog);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100769 if (ret) {
770 warn_failed_init_tracer(trace, ret);
771 goto out;
772 }
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100773 tracing_start_cmdline_record();
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100774
775 /* Sleep for a 1/10 of a second */
776 msleep(100);
777
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100778 /* Have we just recovered from a hang? */
779 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
Frederic Weisbecker0cf53ff2009-03-22 15:13:07 +0100780 tracing_selftest_disabled = true;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +0100781 ret = -1;
782 goto out;
783 }
784
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100785 tracing_stop();
786
787 /* check the trace buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500788 ret = trace_test_buffer(&tr->trace_buffer, &count);
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100789
790 trace->reset(tr);
791 tracing_start();
792
793 if (!ret && !count) {
794 printk(KERN_CONT ".. no entries found ..");
795 ret = -1;
796 goto out;
797 }
798
799 /* Don't test dynamic tracing, the function tracer already did */
800
801out:
802 /* Stop it if we failed */
803 if (ret)
804 ftrace_graph_stop();
805
806 return ret;
807}
808#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
809
810
Steven Rostedt60a11772008-05-12 21:20:44 +0200811#ifdef CONFIG_IRQSOFF_TRACER
812int
813trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
814{
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500815 unsigned long save_max = tr->max_latency;
Steven Rostedt60a11772008-05-12 21:20:44 +0200816 unsigned long count;
817 int ret;
818
819 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200820 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100821 if (ret) {
822 warn_failed_init_tracer(trace, ret);
823 return ret;
824 }
825
Steven Rostedt60a11772008-05-12 21:20:44 +0200826 /* reset the max latency */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500827 tr->max_latency = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +0200828 /* disable interrupts for a bit */
829 local_irq_disable();
830 udelay(100);
831 local_irq_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100832
833 /*
834 * Stop the tracer to avoid a warning subsequent
835 * to buffer flipping failure because tracing_stop()
836 * disables the tr and max buffers, making flipping impossible
837 * in case of parallels max irqs off latencies.
838 */
839 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200840 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500841 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200842 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500843 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Steven Rostedt60a11772008-05-12 21:20:44 +0200844 if (!ret)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500845 ret = trace_test_buffer(&tr->max_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +0200846 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500847 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200848
849 if (!ret && !count) {
850 printk(KERN_CONT ".. no entries found ..");
851 ret = -1;
852 }
853
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500854 tr->max_latency = save_max;
Steven Rostedt60a11772008-05-12 21:20:44 +0200855
856 return ret;
857}
858#endif /* CONFIG_IRQSOFF_TRACER */
859
860#ifdef CONFIG_PREEMPT_TRACER
861int
862trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
863{
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500864 unsigned long save_max = tr->max_latency;
Steven Rostedt60a11772008-05-12 21:20:44 +0200865 unsigned long count;
866 int ret;
867
Steven Rostedt769c48e2008-11-07 22:36:02 -0500868 /*
869 * Now that the big kernel lock is no longer preemptable,
870 * and this is called with the BKL held, it will always
871 * fail. If preemption is already disabled, simply
872 * pass the test. When the BKL is removed, or becomes
873 * preemptible again, we will once again test this,
874 * so keep it in.
875 */
876 if (preempt_count()) {
877 printk(KERN_CONT "can not test ... force ");
878 return 0;
879 }
880
Steven Rostedt60a11772008-05-12 21:20:44 +0200881 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200882 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100883 if (ret) {
884 warn_failed_init_tracer(trace, ret);
885 return ret;
886 }
887
Steven Rostedt60a11772008-05-12 21:20:44 +0200888 /* reset the max latency */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500889 tr->max_latency = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +0200890 /* disable preemption for a bit */
891 preempt_disable();
892 udelay(100);
893 preempt_enable();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100894
895 /*
896 * Stop the tracer to avoid a warning subsequent
897 * to buffer flipping failure because tracing_stop()
898 * disables the tr and max buffers, making flipping impossible
899 * in case of parallels max preempt off latencies.
900 */
901 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200902 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500903 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200904 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500905 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Steven Rostedt60a11772008-05-12 21:20:44 +0200906 if (!ret)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500907 ret = trace_test_buffer(&tr->max_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +0200908 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500909 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200910
911 if (!ret && !count) {
912 printk(KERN_CONT ".. no entries found ..");
913 ret = -1;
914 }
915
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500916 tr->max_latency = save_max;
Steven Rostedt60a11772008-05-12 21:20:44 +0200917
918 return ret;
919}
920#endif /* CONFIG_PREEMPT_TRACER */
921
922#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
923int
924trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
925{
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500926 unsigned long save_max = tr->max_latency;
Steven Rostedt60a11772008-05-12 21:20:44 +0200927 unsigned long count;
928 int ret;
929
Steven Rostedt769c48e2008-11-07 22:36:02 -0500930 /*
931 * Now that the big kernel lock is no longer preemptable,
932 * and this is called with the BKL held, it will always
933 * fail. If preemption is already disabled, simply
934 * pass the test. When the BKL is removed, or becomes
935 * preemptible again, we will once again test this,
936 * so keep it in.
937 */
938 if (preempt_count()) {
939 printk(KERN_CONT "can not test ... force ");
940 return 0;
941 }
942
Steven Rostedt60a11772008-05-12 21:20:44 +0200943 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200944 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100945 if (ret) {
946 warn_failed_init_tracer(trace, ret);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100947 goto out_no_start;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100948 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200949
950 /* reset the max latency */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500951 tr->max_latency = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +0200952
953 /* disable preemption and interrupts for a bit */
954 preempt_disable();
955 local_irq_disable();
956 udelay(100);
957 preempt_enable();
958 /* reverse the order of preempt vs irqs */
959 local_irq_enable();
960
Frederic Weisbecker49036202009-03-17 22:38:58 +0100961 /*
962 * Stop the tracer to avoid a warning subsequent
963 * to buffer flipping failure because tracing_stop()
964 * disables the tr and max buffers, making flipping impossible
965 * in case of parallels max irqs/preempt off latencies.
966 */
967 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200968 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500969 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200970 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500971 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100972 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200973 goto out;
974
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500975 ret = trace_test_buffer(&tr->max_buffer, &count);
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +0100976 if (ret)
Steven Rostedt60a11772008-05-12 21:20:44 +0200977 goto out;
978
979 if (!ret && !count) {
980 printk(KERN_CONT ".. no entries found ..");
981 ret = -1;
982 goto out;
983 }
984
985 /* do the test by disabling interrupts first this time */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500986 tr->max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500987 tracing_start();
Frederic Weisbecker49036202009-03-17 22:38:58 +0100988 trace->start(tr);
989
Steven Rostedt60a11772008-05-12 21:20:44 +0200990 preempt_disable();
991 local_irq_disable();
992 udelay(100);
993 preempt_enable();
994 /* reverse the order of preempt vs irqs */
995 local_irq_enable();
996
Frederic Weisbecker49036202009-03-17 22:38:58 +0100997 trace->stop(tr);
Steven Rostedt60a11772008-05-12 21:20:44 +0200998 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500999 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +02001000 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001001 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Steven Rostedt60a11772008-05-12 21:20:44 +02001002 if (ret)
1003 goto out;
1004
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001005 ret = trace_test_buffer(&tr->max_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +02001006
1007 if (!ret && !count) {
1008 printk(KERN_CONT ".. no entries found ..");
1009 ret = -1;
1010 goto out;
1011 }
1012
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +01001013out:
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001014 tracing_start();
Frederic Weisbeckerac1d52d2009-03-16 00:32:41 +01001015out_no_start:
1016 trace->reset(tr);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001017 tr->max_latency = save_max;
Steven Rostedt60a11772008-05-12 21:20:44 +02001018
1019 return ret;
1020}
1021#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1022
Steven Noonanfb1b6d82008-09-19 03:06:43 -07001023#ifdef CONFIG_NOP_TRACER
1024int
1025trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1026{
1027 /* What could possibly go wrong? */
1028 return 0;
1029}
1030#endif
1031
Steven Rostedt60a11772008-05-12 21:20:44 +02001032#ifdef CONFIG_SCHED_TRACER
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001033
1034struct wakeup_test_data {
1035 struct completion is_ready;
1036 int go;
1037};
1038
Steven Rostedt60a11772008-05-12 21:20:44 +02001039static int trace_wakeup_test_thread(void *data)
1040{
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001041 /* Make this a -deadline thread */
1042 static const struct sched_attr attr = {
1043 .sched_policy = SCHED_DEADLINE,
1044 .sched_runtime = 100000ULL,
1045 .sched_deadline = 10000000ULL,
1046 .sched_period = 10000000ULL
1047 };
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001048 struct wakeup_test_data *x = data;
Steven Rostedt60a11772008-05-12 21:20:44 +02001049
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001050 sched_setattr(current, &attr);
Steven Rostedt60a11772008-05-12 21:20:44 +02001051
1052 /* Make it know we have a new prio */
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001053 complete(&x->is_ready);
Steven Rostedt60a11772008-05-12 21:20:44 +02001054
1055 /* now go to sleep and let the test wake us up */
1056 set_current_state(TASK_INTERRUPTIBLE);
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001057 while (!x->go) {
1058 schedule();
1059 set_current_state(TASK_INTERRUPTIBLE);
1060 }
Steven Rostedt60a11772008-05-12 21:20:44 +02001061
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001062 complete(&x->is_ready);
1063
1064 set_current_state(TASK_INTERRUPTIBLE);
Steven Rostedt3c18c102012-07-31 10:23:37 -04001065
Steven Rostedt60a11772008-05-12 21:20:44 +02001066 /* we are awake, now wait to disappear */
1067 while (!kthread_should_stop()) {
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001068 schedule();
1069 set_current_state(TASK_INTERRUPTIBLE);
Steven Rostedt60a11772008-05-12 21:20:44 +02001070 }
1071
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001072 __set_current_state(TASK_RUNNING);
1073
Steven Rostedt60a11772008-05-12 21:20:44 +02001074 return 0;
1075}
Steven Rostedt60a11772008-05-12 21:20:44 +02001076int
1077trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1078{
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001079 unsigned long save_max = tr->max_latency;
Steven Rostedt60a11772008-05-12 21:20:44 +02001080 struct task_struct *p;
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001081 struct wakeup_test_data data;
Steven Rostedt60a11772008-05-12 21:20:44 +02001082 unsigned long count;
1083 int ret;
1084
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001085 memset(&data, 0, sizeof(data));
1086
1087 init_completion(&data.is_ready);
Steven Rostedt60a11772008-05-12 21:20:44 +02001088
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001089 /* create a -deadline thread */
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001090 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001091 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +02001092 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1093 return -1;
1094 }
1095
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001096 /* make sure the thread is running at -deadline policy */
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001097 wait_for_completion(&data.is_ready);
Steven Rostedt60a11772008-05-12 21:20:44 +02001098
1099 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001100 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001101 if (ret) {
1102 warn_failed_init_tracer(trace, ret);
1103 return ret;
1104 }
1105
Steven Rostedt60a11772008-05-12 21:20:44 +02001106 /* reset the max latency */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001107 tr->max_latency = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +02001108
Steven Rostedt3c18c102012-07-31 10:23:37 -04001109 while (p->on_rq) {
1110 /*
Dario Faggioliaf6ace72013-11-07 14:43:42 +01001111 * Sleep to make sure the -deadline thread is asleep too.
Steven Rostedt3c18c102012-07-31 10:23:37 -04001112 * On virtual machines we can't rely on timings,
1113 * but we want to make sure this test still works.
1114 */
1115 msleep(100);
1116 }
Steven Rostedt60a11772008-05-12 21:20:44 +02001117
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001118 init_completion(&data.is_ready);
1119
1120 data.go = 1;
1121 /* memory barrier is in the wake_up_process() */
Steven Rostedt60a11772008-05-12 21:20:44 +02001122
1123 wake_up_process(p);
1124
Steven Rostedt3c18c102012-07-31 10:23:37 -04001125 /* Wait for the task to wake up */
Steven Rostedtaddff1f2014-10-08 13:52:16 -04001126 wait_for_completion(&data.is_ready);
Steven Rostedt5aa60c62008-09-29 23:02:37 -04001127
Steven Rostedt60a11772008-05-12 21:20:44 +02001128 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001129 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +02001130 /* check both trace buffers */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001131 ret = trace_test_buffer(&tr->trace_buffer, NULL);
Steven Rostedt60a11772008-05-12 21:20:44 +02001132 if (!ret)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001133 ret = trace_test_buffer(&tr->max_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +02001134
1135
1136 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001137 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +02001138
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001139 tr->max_latency = save_max;
Steven Rostedt60a11772008-05-12 21:20:44 +02001140
1141 /* kill the thread */
1142 kthread_stop(p);
1143
1144 if (!ret && !count) {
1145 printk(KERN_CONT ".. no entries found ..");
1146 ret = -1;
1147 }
1148
1149 return ret;
1150}
1151#endif /* CONFIG_SCHED_TRACER */
1152
1153#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1154int
1155trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1156{
1157 unsigned long count;
1158 int ret;
1159
1160 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001161 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001162 if (ret) {
1163 warn_failed_init_tracer(trace, ret);
1164 return ret;
1165 }
1166
Steven Rostedt60a11772008-05-12 21:20:44 +02001167 /* Sleep for a 1/10 of a second */
1168 msleep(100);
1169 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001170 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +02001171 /* check the trace buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001172 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt60a11772008-05-12 21:20:44 +02001173 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -05001174 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +02001175
1176 if (!ret && !count) {
1177 printk(KERN_CONT ".. no entries found ..");
1178 ret = -1;
1179 }
1180
1181 return ret;
1182}
1183#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +02001184
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001185#ifdef CONFIG_BRANCH_TRACER
1186int
1187trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1188{
1189 unsigned long count;
1190 int ret;
1191
1192 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02001193 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01001194 if (ret) {
1195 warn_failed_init_tracer(trace, ret);
1196 return ret;
1197 }
1198
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001199 /* Sleep for a 1/10 of a second */
1200 msleep(100);
1201 /* stop the tracing. */
1202 tracing_stop();
1203 /* check the trace buffer */
Steven Rostedt (Red Hat)0184d502013-05-29 15:56:49 -04001204 ret = trace_test_buffer(&tr->trace_buffer, &count);
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001205 trace->reset(tr);
1206 tracing_start();
1207
Wenji Huangd2ef7c22009-02-17 01:09:47 -05001208 if (!ret && !count) {
1209 printk(KERN_CONT ".. no entries found ..");
1210 ret = -1;
1211 }
1212
Steven Rostedt80e5ea42008-11-12 15:24:24 -05001213 return ret;
1214}
1215#endif /* CONFIG_BRANCH_TRACER */
Markus Metzger321bb5e2009-03-13 10:50:27 +01001216