blob: 76bb884b6e169530909cc2305427415ca85d72b9 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053024#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020028#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020029
Abhishek Sagar395a59d2008-06-21 23:47:27 +053030#include <asm/ftrace.h>
31
Steven Rostedt3d083392008-05-12 21:20:42 +020032#include "trace.h"
33
Steven Rostedt69128962008-10-23 09:33:03 -040034#define FTRACE_WARN_ON(cond) \
35 do { \
36 if (WARN_ON(cond)) \
37 ftrace_kill(); \
38 } while (0)
39
40#define FTRACE_WARN_ON_ONCE(cond) \
41 do { \
42 if (WARN_ON_ONCE(cond)) \
43 ftrace_kill(); \
44 } while (0)
45
Steven Rostedt4eebcc82008-05-12 21:20:48 +020046/* ftrace_enabled is a method to turn ftrace on or off */
47int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020048static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020049
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050050/* set when tracing only a pid */
Steven Rostedt978f3a42008-12-04 00:26:40 -050051struct pid *ftrace_pid_trace;
Steven Rostedt21bbecd2008-12-04 23:30:56 -050052static struct pid * const ftrace_swapper_pid = &init_struct_pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050053
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050054/* Quick disabling of function tracer. */
55int function_trace_stop;
56
Steven Rostedt4eebcc82008-05-12 21:20:48 +020057/*
58 * ftrace_disabled is set when an anomaly is discovered.
59 * ftrace_disabled is much stronger than ftrace_enabled.
60 */
61static int ftrace_disabled __read_mostly;
62
Steven Rostedt3d083392008-05-12 21:20:42 +020063static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020064static DEFINE_MUTEX(ftrace_sysctl_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -050065static DEFINE_MUTEX(ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020066
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020067static struct ftrace_ops ftrace_list_end __read_mostly =
68{
69 .func = ftrace_stub,
70};
71
72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050074ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050075ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020076
Ingo Molnarf2252932008-05-22 10:37:48 +020077static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078{
79 struct ftrace_ops *op = ftrace_list;
80
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
83
84 while (op != &ftrace_list_end) {
85 /* silly alpha */
86 read_barrier_depends();
87 op->func(ip, parent_ip);
88 op = op->next;
89 };
90}
91
Steven Rostedtdf4fc312008-11-26 00:16:23 -050092static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050094 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -050095 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200107/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200108 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200112 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200113void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114{
Steven Rostedt3d083392008-05-12 21:20:42 +0200115 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500116 __ftrace_trace_function = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500117 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200118}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
131}
132#endif
133
Ingo Molnare309b412008-05-12 21:20:51 +0200134static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200135{
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400136 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200137 spin_lock(&ftrace_lock);
138
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200139 ops->next = ftrace_list;
140 /*
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
145 */
146 smp_wmb();
147 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +0200148
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200149 if (ftrace_enabled) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500150 ftrace_func_t func;
151
152 if (ops->next == &ftrace_list_end)
153 func = ops->func;
154 else
155 func = ftrace_list_func;
156
Steven Rostedt978f3a42008-12-04 00:26:40 -0500157 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500158 set_ftrace_pid_function(func);
159 func = ftrace_pid_func;
160 }
161
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200162 /*
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
165 */
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500166#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500167 ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500168#else
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500169 __ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500170 ftrace_trace_function = ftrace_test_stop_func;
171#endif
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200172 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200173
174 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200175
176 return 0;
177}
178
Ingo Molnare309b412008-05-12 21:20:51 +0200179static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200180{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200181 struct ftrace_ops **p;
182 int ret = 0;
183
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400184 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200185 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200186
187 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200188 * If we are removing the last function, then simply point
189 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200190 */
191 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
192 ftrace_trace_function = ftrace_stub;
193 ftrace_list = &ftrace_list_end;
194 goto out;
195 }
196
197 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
198 if (*p == ops)
199 break;
200
201 if (*p != ops) {
202 ret = -1;
203 goto out;
204 }
205
206 *p = (*p)->next;
207
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200208 if (ftrace_enabled) {
209 /* If we only have one func left, then call that directly */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500210 if (ftrace_list->next == &ftrace_list_end) {
211 ftrace_func_t func = ftrace_list->func;
212
Steven Rostedt978f3a42008-12-04 00:26:40 -0500213 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500214 set_ftrace_pid_function(func);
215 func = ftrace_pid_func;
216 }
217#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218 ftrace_trace_function = func;
219#else
220 __ftrace_trace_function = func;
221#endif
222 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200223 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200224
225 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200226 spin_unlock(&ftrace_lock);
227
228 return ret;
229}
230
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500231static void ftrace_update_pid_func(void)
232{
233 ftrace_func_t func;
234
235 /* should not be called from interrupt context */
236 spin_lock(&ftrace_lock);
237
238 if (ftrace_trace_function == ftrace_stub)
239 goto out;
240
241 func = ftrace_trace_function;
242
Steven Rostedt978f3a42008-12-04 00:26:40 -0500243 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500244 set_ftrace_pid_function(func);
245 func = ftrace_pid_func;
246 } else {
Liming Wang66eafeb2008-12-02 10:33:08 +0800247 if (func == ftrace_pid_func)
248 func = ftrace_pid_function;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500249 }
250
251#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252 ftrace_trace_function = func;
253#else
254 __ftrace_trace_function = func;
255#endif
256
257 out:
258 spin_unlock(&ftrace_lock);
259}
260
Steven Rostedt3d083392008-05-12 21:20:42 +0200261#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400262#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400263# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400264#endif
265
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200266enum {
267 FTRACE_ENABLE_CALLS = (1 << 0),
268 FTRACE_DISABLE_CALLS = (1 << 1),
269 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
270 FTRACE_ENABLE_MCOUNT = (1 << 3),
271 FTRACE_DISABLE_MCOUNT = (1 << 4),
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500272 FTRACE_START_FUNC_RET = (1 << 5),
273 FTRACE_STOP_FUNC_RET = (1 << 6),
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200274};
275
Steven Rostedt5072c592008-05-12 21:20:43 +0200276static int ftrace_filtered;
277
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400278static LIST_HEAD(ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200279
Steven Rostedt41c52c02008-05-22 11:46:33 -0400280static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200281
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200282struct ftrace_page {
283 struct ftrace_page *next;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500284 int index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200285 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700286};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200287
288#define ENTRIES_PER_PAGE \
289 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
290
291/* estimate from running different kernels */
292#define NR_TO_INIT 10000
293
294static struct ftrace_page *ftrace_pages_start;
295static struct ftrace_page *ftrace_pages;
296
Steven Rostedt37ad5082008-05-12 21:20:48 +0200297static struct dyn_ftrace *ftrace_free_records;
298
Abhishek Sagarecea6562008-06-21 23:47:53 +0530299
300#ifdef CONFIG_KPROBES
Ingo Molnarf17845e2008-10-24 12:47:10 +0200301
302static int frozen_record_count;
303
Abhishek Sagarecea6562008-06-21 23:47:53 +0530304static inline void freeze_record(struct dyn_ftrace *rec)
305{
306 if (!(rec->flags & FTRACE_FL_FROZEN)) {
307 rec->flags |= FTRACE_FL_FROZEN;
308 frozen_record_count++;
309 }
310}
311
312static inline void unfreeze_record(struct dyn_ftrace *rec)
313{
314 if (rec->flags & FTRACE_FL_FROZEN) {
315 rec->flags &= ~FTRACE_FL_FROZEN;
316 frozen_record_count--;
317 }
318}
319
320static inline int record_frozen(struct dyn_ftrace *rec)
321{
322 return rec->flags & FTRACE_FL_FROZEN;
323}
324#else
325# define freeze_record(rec) ({ 0; })
326# define unfreeze_record(rec) ({ 0; })
327# define record_frozen(rec) ({ 0; })
328#endif /* CONFIG_KPROBES */
329
Ingo Molnare309b412008-05-12 21:20:51 +0200330static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad5082008-05-12 21:20:48 +0200331{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200332 rec->ip = (unsigned long)ftrace_free_records;
333 ftrace_free_records = rec;
334 rec->flags |= FTRACE_FL_FREE;
335}
336
Steven Rostedtfed19392008-08-14 22:47:19 -0400337void ftrace_release(void *start, unsigned long size)
338{
339 struct dyn_ftrace *rec;
340 struct ftrace_page *pg;
341 unsigned long s = (unsigned long)start;
342 unsigned long e = s + size;
343 int i;
344
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400345 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400346 return;
347
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400348 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -0400349 spin_lock(&ftrace_lock);
350
351 for (pg = ftrace_pages_start; pg; pg = pg->next) {
352 for (i = 0; i < pg->index; i++) {
353 rec = &pg->records[i];
354
355 if ((rec->ip >= s) && (rec->ip < e))
356 ftrace_free_rec(rec);
357 }
358 }
359 spin_unlock(&ftrace_lock);
Steven Rostedtfed19392008-08-14 22:47:19 -0400360}
361
Ingo Molnare309b412008-05-12 21:20:51 +0200362static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200363{
Steven Rostedt37ad5082008-05-12 21:20:48 +0200364 struct dyn_ftrace *rec;
365
366 /* First check for freed records */
367 if (ftrace_free_records) {
368 rec = ftrace_free_records;
369
Steven Rostedt37ad5082008-05-12 21:20:48 +0200370 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
Steven Rostedt69128962008-10-23 09:33:03 -0400371 FTRACE_WARN_ON_ONCE(1);
Steven Rostedt37ad5082008-05-12 21:20:48 +0200372 ftrace_free_records = NULL;
373 return NULL;
374 }
375
376 ftrace_free_records = (void *)rec->ip;
377 memset(rec, 0, sizeof(*rec));
378 return rec;
379 }
380
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200381 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400382 if (!ftrace_pages->next) {
383 /* allocate another page */
384 ftrace_pages->next =
385 (void *)get_zeroed_page(GFP_KERNEL);
386 if (!ftrace_pages->next)
387 return NULL;
388 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200389 ftrace_pages = ftrace_pages->next;
390 }
391
392 return &ftrace_pages->records[ftrace_pages->index++];
393}
394
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400395static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200396ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200397{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400398 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200399
Steven Rostedtf3c7ac42008-11-14 16:21:19 -0800400 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400401 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200402
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400403 rec = ftrace_alloc_dyn_node(ip);
404 if (!rec)
405 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +0200406
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400407 rec->ip = ip;
Steven Rostedt3d083392008-05-12 21:20:42 +0200408
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400409 list_add(&rec->list, &ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200410
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400411 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200412}
413
Steven Rostedt05736a42008-09-22 14:55:47 -0700414static void print_ip_ins(const char *fmt, unsigned char *p)
415{
416 int i;
417
418 printk(KERN_CONT "%s", fmt);
419
420 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
421 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
422}
423
Steven Rostedt31e88902008-11-14 16:21:19 -0800424static void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800425{
426 switch (failed) {
427 case -EFAULT:
428 FTRACE_WARN_ON_ONCE(1);
429 pr_info("ftrace faulted on modifying ");
430 print_ip_sym(ip);
431 break;
432 case -EINVAL:
433 FTRACE_WARN_ON_ONCE(1);
434 pr_info("ftrace failed to modify ");
435 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800436 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800437 printk(KERN_CONT "\n");
438 break;
439 case -EPERM:
440 FTRACE_WARN_ON_ONCE(1);
441 pr_info("ftrace faulted on writing ");
442 print_ip_sym(ip);
443 break;
444 default:
445 FTRACE_WARN_ON_ONCE(1);
446 pr_info("ftrace faulted on unknown error ");
447 print_ip_sym(ip);
448 }
449}
450
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200451
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530452static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800453__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200454{
Ingo Molnare309b412008-05-12 21:20:51 +0200455 unsigned long ip, fl;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100456 unsigned long ftrace_addr;
457
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100458 ftrace_addr = (unsigned long)ftrace_caller;
Steven Rostedt5072c592008-05-12 21:20:43 +0200459
460 ip = rec->ip;
461
Steven Rostedt982c3502008-11-15 16:31:41 -0500462 /*
463 * If this record is not to be traced and
464 * it is not enabled then do nothing.
465 *
466 * If this record is not to be traced and
467 * it is enabled then disabled it.
468 *
469 */
470 if (rec->flags & FTRACE_FL_NOTRACE) {
471 if (rec->flags & FTRACE_FL_ENABLED)
472 rec->flags &= ~FTRACE_FL_ENABLED;
473 else
Steven Rostedt5072c592008-05-12 21:20:43 +0200474 return 0;
475
Steven Rostedt982c3502008-11-15 16:31:41 -0500476 } else if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200477 /*
Steven Rostedt982c3502008-11-15 16:31:41 -0500478 * Filtering is on:
Steven Rostedt5072c592008-05-12 21:20:43 +0200479 */
Steven Rostedt982c3502008-11-15 16:31:41 -0500480
481 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
482
483 /* Record is filtered and enabled, do nothing */
484 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
485 return 0;
486
487 /* Record is not filtered and is not enabled do nothing */
488 if (!fl)
489 return 0;
490
491 /* Record is not filtered but enabled, disable it */
492 if (fl == FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200493 rec->flags &= ~FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500494 else
495 /* Otherwise record is filtered but not enabled, enable it */
Steven Rostedt5072c592008-05-12 21:20:43 +0200496 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +0200497 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500498 /* Disable or not filtered */
Steven Rostedt5072c592008-05-12 21:20:43 +0200499
500 if (enable) {
Steven Rostedt982c3502008-11-15 16:31:41 -0500501 /* if record is enabled, do nothing */
Steven Rostedt41c52c02008-05-22 11:46:33 -0400502 if (rec->flags & FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200503 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500504
Steven Rostedt41c52c02008-05-22 11:46:33 -0400505 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500506
Steven Rostedt5072c592008-05-12 21:20:43 +0200507 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500508
509 /* if record is not enabled do nothing */
Steven Rostedt5072c592008-05-12 21:20:43 +0200510 if (!(rec->flags & FTRACE_FL_ENABLED))
511 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500512
Steven Rostedt5072c592008-05-12 21:20:43 +0200513 rec->flags &= ~FTRACE_FL_ENABLED;
514 }
515 }
516
Steven Rostedt982c3502008-11-15 16:31:41 -0500517 if (rec->flags & FTRACE_FL_ENABLED)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100518 return ftrace_make_call(rec, ftrace_addr);
Steven Rostedt31e88902008-11-14 16:21:19 -0800519 else
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100520 return ftrace_make_nop(NULL, rec, ftrace_addr);
Steven Rostedt5072c592008-05-12 21:20:43 +0200521}
522
523static void ftrace_replace_code(int enable)
524{
525 int i, failed;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200526 struct dyn_ftrace *rec;
527 struct ftrace_page *pg;
528
Steven Rostedt37ad5082008-05-12 21:20:48 +0200529 for (pg = ftrace_pages_start; pg; pg = pg->next) {
530 for (i = 0; i < pg->index; i++) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200531 rec = &pg->records[i];
532
Steven Rostedt918c1152008-11-14 16:21:19 -0800533 /*
534 * Skip over free records and records that have
535 * failed.
536 */
537 if (rec->flags & FTRACE_FL_FREE ||
538 rec->flags & FTRACE_FL_FAILED)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200539 continue;
540
541 /* ignore updates to this record's mcount site */
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200542 if (get_kprobe((void *)rec->ip)) {
543 freeze_record(rec);
Steven Rostedt5072c592008-05-12 21:20:43 +0200544 continue;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200545 } else {
546 unfreeze_record(rec);
547 }
548
Steven Rostedt31e88902008-11-14 16:21:19 -0800549 failed = __ftrace_replace_code(rec, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200550 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
551 rec->flags |= FTRACE_FL_FAILED;
552 if ((system_state == SYSTEM_BOOTING) ||
553 !core_kernel_text(rec->ip)) {
554 ftrace_free_rec(rec);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800555 } else
Steven Rostedt31e88902008-11-14 16:21:19 -0800556 ftrace_bug(failed, rec->ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200557 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200558 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200559 }
560}
561
Ingo Molnare309b412008-05-12 21:20:51 +0200562static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800563ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200564{
565 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400566 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200567
568 ip = rec->ip;
569
Shaohua Li25aac9d2009-01-09 11:29:40 +0800570 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400571 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -0800572 ftrace_bug(ret, ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200573 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530574 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +0200575 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530576 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200577}
578
Ingo Molnare309b412008-05-12 21:20:51 +0200579static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200580{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200581 int *command = data;
582
Steven Rostedta3583242008-11-11 15:01:42 -0500583 if (*command & FTRACE_ENABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200584 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -0500585 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200586 ftrace_replace_code(0);
587
588 if (*command & FTRACE_UPDATE_TRACE_FUNC)
589 ftrace_update_ftrace_func(ftrace_trace_function);
590
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500591 if (*command & FTRACE_START_FUNC_RET)
592 ftrace_enable_ftrace_graph_caller();
593 else if (*command & FTRACE_STOP_FUNC_RET)
594 ftrace_disable_ftrace_graph_caller();
595
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200596 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200597}
598
Ingo Molnare309b412008-05-12 21:20:51 +0200599static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200600{
Rusty Russell784e2d72008-07-28 12:16:31 -0500601 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200602}
603
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200604static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500605static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500606
607static void ftrace_startup_enable(int command)
608{
609 if (saved_ftrace_func != ftrace_trace_function) {
610 saved_ftrace_func = ftrace_trace_function;
611 command |= FTRACE_UPDATE_TRACE_FUNC;
612 }
613
614 if (!command || !ftrace_enabled)
615 return;
616
617 ftrace_run_update_code(command);
618}
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200619
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500620static void ftrace_startup(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200621{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200622 if (unlikely(ftrace_disabled))
623 return;
624
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400625 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500626 ftrace_start_up++;
Steven Rostedt982c3502008-11-15 16:31:41 -0500627 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200628
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500629 ftrace_startup_enable(command);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200630
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400631 mutex_unlock(&ftrace_start_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200632}
633
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500634static void ftrace_shutdown(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200635{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200636 if (unlikely(ftrace_disabled))
637 return;
638
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400639 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500640 ftrace_start_up--;
641 if (!ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200642 command |= FTRACE_DISABLE_CALLS;
643
644 if (saved_ftrace_func != ftrace_trace_function) {
645 saved_ftrace_func = ftrace_trace_function;
646 command |= FTRACE_UPDATE_TRACE_FUNC;
647 }
648
649 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200650 goto out;
651
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200652 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200653 out:
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400654 mutex_unlock(&ftrace_start_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200655}
656
Ingo Molnare309b412008-05-12 21:20:51 +0200657static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200658{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200659 int command = FTRACE_ENABLE_MCOUNT;
660
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200661 if (unlikely(ftrace_disabled))
662 return;
663
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400664 mutex_lock(&ftrace_start_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200665 /* Force update next time */
666 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500667 /* ftrace_start_up is true if we want ftrace running */
668 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200669 command |= FTRACE_ENABLE_CALLS;
670
671 ftrace_run_update_code(command);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400672 mutex_unlock(&ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200673}
674
Ingo Molnare309b412008-05-12 21:20:51 +0200675static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200676{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200677 int command = FTRACE_DISABLE_MCOUNT;
678
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200679 if (unlikely(ftrace_disabled))
680 return;
681
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400682 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500683 /* ftrace_start_up is true if ftrace is running */
684 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200685 command |= FTRACE_DISABLE_CALLS;
686
687 ftrace_run_update_code(command);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400688 mutex_unlock(&ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200689}
690
Steven Rostedt3d083392008-05-12 21:20:42 +0200691static cycle_t ftrace_update_time;
692static unsigned long ftrace_update_cnt;
693unsigned long ftrace_update_tot_cnt;
694
Steven Rostedt31e88902008-11-14 16:21:19 -0800695static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +0200696{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400697 struct dyn_ftrace *p, *t;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530698 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200699
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200700 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200701 ftrace_update_cnt = 0;
702
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400703 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530704
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400705 /* If something went wrong, bail without enabling anything */
706 if (unlikely(ftrace_disabled))
707 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200708
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400709 list_del_init(&p->list);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530710
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400711 /* convert record (i.e, patch mcount-call with NOP) */
Steven Rostedt31e88902008-11-14 16:21:19 -0800712 if (ftrace_code_disable(mod, p)) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400713 p->flags |= FTRACE_FL_CONVERTED;
714 ftrace_update_cnt++;
715 } else
716 ftrace_free_rec(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200717 }
718
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200719 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200720 ftrace_update_time = stop - start;
721 ftrace_update_tot_cnt += ftrace_update_cnt;
722
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200723 return 0;
724}
725
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400726static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200727{
728 struct ftrace_page *pg;
729 int cnt;
730 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200731
732 /* allocate a few pages */
733 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
734 if (!ftrace_pages_start)
735 return -1;
736
737 /*
738 * Allocate a few more pages.
739 *
740 * TODO: have some parser search vmlinux before
741 * final linking to find all calls to ftrace.
742 * Then we can:
743 * a) know how many pages to allocate.
744 * and/or
745 * b) set up the table then.
746 *
747 * The dynamic code is still necessary for
748 * modules.
749 */
750
751 pg = ftrace_pages = ftrace_pages_start;
752
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400753 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400754 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +0800755 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200756
757 for (i = 0; i < cnt; i++) {
758 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
759
760 /* If we fail, we'll try later anyway */
761 if (!pg->next)
762 break;
763
764 pg = pg->next;
765 }
766
767 return 0;
768}
769
Steven Rostedt5072c592008-05-12 21:20:43 +0200770enum {
771 FTRACE_ITER_FILTER = (1 << 0),
772 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400773 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530774 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt5072c592008-05-12 21:20:43 +0200775};
776
777#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
778
779struct ftrace_iterator {
Steven Rostedt5072c592008-05-12 21:20:43 +0200780 struct ftrace_page *pg;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500781 int idx;
Steven Rostedt5072c592008-05-12 21:20:43 +0200782 unsigned flags;
783 unsigned char buffer[FTRACE_BUFF_MAX+1];
784 unsigned buffer_idx;
785 unsigned filtered;
786};
787
Ingo Molnare309b412008-05-12 21:20:51 +0200788static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200789t_next(struct seq_file *m, void *v, loff_t *pos)
790{
791 struct ftrace_iterator *iter = m->private;
792 struct dyn_ftrace *rec = NULL;
793
794 (*pos)++;
795
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400796 /* should not be called from interrupt context */
797 spin_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200798 retry:
799 if (iter->idx >= iter->pg->index) {
800 if (iter->pg->next) {
801 iter->pg = iter->pg->next;
802 iter->idx = 0;
803 goto retry;
Liming Wang50cdaf02008-11-28 12:13:21 +0800804 } else {
805 iter->idx = -1;
Steven Rostedt5072c592008-05-12 21:20:43 +0200806 }
807 } else {
808 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400809 if ((rec->flags & FTRACE_FL_FREE) ||
810
811 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530812 (rec->flags & FTRACE_FL_FAILED)) ||
813
814 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400815 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530816
Steven Rostedt0183fb12008-11-07 22:36:02 -0500817 ((iter->flags & FTRACE_ITER_FILTER) &&
818 !(rec->flags & FTRACE_FL_FILTER)) ||
819
Steven Rostedt41c52c02008-05-22 11:46:33 -0400820 ((iter->flags & FTRACE_ITER_NOTRACE) &&
821 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200822 rec = NULL;
823 goto retry;
824 }
825 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400826 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200827
Steven Rostedt5072c592008-05-12 21:20:43 +0200828 return rec;
829}
830
831static void *t_start(struct seq_file *m, loff_t *pos)
832{
833 struct ftrace_iterator *iter = m->private;
834 void *p = NULL;
Steven Rostedt5072c592008-05-12 21:20:43 +0200835
Liming Wang50cdaf02008-11-28 12:13:21 +0800836 if (*pos > 0) {
837 if (iter->idx < 0)
838 return p;
839 (*pos)--;
840 iter->idx--;
841 }
walimis5821e1b2008-11-15 15:19:06 +0800842
Liming Wang50cdaf02008-11-28 12:13:21 +0800843 p = t_next(m, p, pos);
Steven Rostedt5072c592008-05-12 21:20:43 +0200844
845 return p;
846}
847
848static void t_stop(struct seq_file *m, void *p)
849{
850}
851
852static int t_show(struct seq_file *m, void *v)
853{
854 struct dyn_ftrace *rec = v;
855 char str[KSYM_SYMBOL_LEN];
856
857 if (!rec)
858 return 0;
859
860 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
861
Liming Wang50cdaf02008-11-28 12:13:21 +0800862 seq_printf(m, "%s\n", str);
Steven Rostedt5072c592008-05-12 21:20:43 +0200863
864 return 0;
865}
866
867static struct seq_operations show_ftrace_seq_ops = {
868 .start = t_start,
869 .next = t_next,
870 .stop = t_stop,
871 .show = t_show,
872};
873
Ingo Molnare309b412008-05-12 21:20:51 +0200874static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200875ftrace_avail_open(struct inode *inode, struct file *file)
876{
877 struct ftrace_iterator *iter;
878 int ret;
879
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200880 if (unlikely(ftrace_disabled))
881 return -ENODEV;
882
Steven Rostedt5072c592008-05-12 21:20:43 +0200883 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
884 if (!iter)
885 return -ENOMEM;
886
887 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +0200888
889 ret = seq_open(file, &show_ftrace_seq_ops);
890 if (!ret) {
891 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200892
Steven Rostedt5072c592008-05-12 21:20:43 +0200893 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200894 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200895 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200896 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200897
898 return ret;
899}
900
901int ftrace_avail_release(struct inode *inode, struct file *file)
902{
903 struct seq_file *m = (struct seq_file *)file->private_data;
904 struct ftrace_iterator *iter = m->private;
905
906 seq_release(inode, file);
907 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200908
Steven Rostedt5072c592008-05-12 21:20:43 +0200909 return 0;
910}
911
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530912static int
913ftrace_failures_open(struct inode *inode, struct file *file)
914{
915 int ret;
916 struct seq_file *m;
917 struct ftrace_iterator *iter;
918
919 ret = ftrace_avail_open(inode, file);
920 if (!ret) {
921 m = (struct seq_file *)file->private_data;
922 iter = (struct ftrace_iterator *)m->private;
923 iter->flags = FTRACE_ITER_FAILURES;
924 }
925
926 return ret;
927}
928
929
Steven Rostedt41c52c02008-05-22 11:46:33 -0400930static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200931{
932 struct ftrace_page *pg;
933 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400934 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200935 unsigned i;
936
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400937 /* should not be called from interrupt context */
938 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400939 if (enable)
940 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200941 pg = ftrace_pages_start;
942 while (pg) {
943 for (i = 0; i < pg->index; i++) {
944 rec = &pg->records[i];
945 if (rec->flags & FTRACE_FL_FAILED)
946 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400947 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +0200948 }
949 pg = pg->next;
950 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400951 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200952}
953
Ingo Molnare309b412008-05-12 21:20:51 +0200954static int
Steven Rostedt41c52c02008-05-22 11:46:33 -0400955ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200956{
957 struct ftrace_iterator *iter;
958 int ret = 0;
959
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200960 if (unlikely(ftrace_disabled))
961 return -ENODEV;
962
Steven Rostedt5072c592008-05-12 21:20:43 +0200963 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
964 if (!iter)
965 return -ENOMEM;
966
Steven Rostedt41c52c02008-05-22 11:46:33 -0400967 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200968 if ((file->f_mode & FMODE_WRITE) &&
969 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -0400970 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +0200971
972 if (file->f_mode & FMODE_READ) {
973 iter->pg = ftrace_pages_start;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400974 iter->flags = enable ? FTRACE_ITER_FILTER :
975 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200976
977 ret = seq_open(file, &show_ftrace_seq_ops);
978 if (!ret) {
979 struct seq_file *m = file->private_data;
980 m->private = iter;
981 } else
982 kfree(iter);
983 } else
984 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400985 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200986
987 return ret;
988}
989
Steven Rostedt41c52c02008-05-22 11:46:33 -0400990static int
991ftrace_filter_open(struct inode *inode, struct file *file)
992{
993 return ftrace_regex_open(inode, file, 1);
994}
995
996static int
997ftrace_notrace_open(struct inode *inode, struct file *file)
998{
999 return ftrace_regex_open(inode, file, 0);
1000}
1001
Ingo Molnare309b412008-05-12 21:20:51 +02001002static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001003ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001004 size_t cnt, loff_t *ppos)
1005{
1006 if (file->f_mode & FMODE_READ)
1007 return seq_read(file, ubuf, cnt, ppos);
1008 else
1009 return -EPERM;
1010}
1011
Ingo Molnare309b412008-05-12 21:20:51 +02001012static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001013ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001014{
1015 loff_t ret;
1016
1017 if (file->f_mode & FMODE_READ)
1018 ret = seq_lseek(file, offset, origin);
1019 else
1020 file->f_pos = ret = 1;
1021
1022 return ret;
1023}
1024
1025enum {
1026 MATCH_FULL,
1027 MATCH_FRONT_ONLY,
1028 MATCH_MIDDLE_ONLY,
1029 MATCH_END_ONLY,
1030};
1031
Ingo Molnare309b412008-05-12 21:20:51 +02001032static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001033ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001034{
1035 char str[KSYM_SYMBOL_LEN];
1036 char *search = NULL;
1037 struct ftrace_page *pg;
1038 struct dyn_ftrace *rec;
1039 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001040 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001041 unsigned i, match = 0, search_len = 0;
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001042 int not = 0;
1043
1044 if (buff[0] == '!') {
1045 not = 1;
1046 buff++;
1047 len--;
1048 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001049
1050 for (i = 0; i < len; i++) {
1051 if (buff[i] == '*') {
1052 if (!i) {
1053 search = buff + i + 1;
1054 type = MATCH_END_ONLY;
1055 search_len = len - (i + 1);
1056 } else {
1057 if (type == MATCH_END_ONLY) {
1058 type = MATCH_MIDDLE_ONLY;
1059 } else {
1060 match = i;
1061 type = MATCH_FRONT_ONLY;
1062 }
1063 buff[i] = 0;
1064 break;
1065 }
1066 }
1067 }
1068
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001069 /* should not be called from interrupt context */
1070 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001071 if (enable)
1072 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001073 pg = ftrace_pages_start;
1074 while (pg) {
1075 for (i = 0; i < pg->index; i++) {
1076 int matched = 0;
1077 char *ptr;
1078
1079 rec = &pg->records[i];
1080 if (rec->flags & FTRACE_FL_FAILED)
1081 continue;
1082 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1083 switch (type) {
1084 case MATCH_FULL:
1085 if (strcmp(str, buff) == 0)
1086 matched = 1;
1087 break;
1088 case MATCH_FRONT_ONLY:
1089 if (memcmp(str, buff, match) == 0)
1090 matched = 1;
1091 break;
1092 case MATCH_MIDDLE_ONLY:
1093 if (strstr(str, search))
1094 matched = 1;
1095 break;
1096 case MATCH_END_ONLY:
1097 ptr = strstr(str, search);
1098 if (ptr && (ptr[search_len] == 0))
1099 matched = 1;
1100 break;
1101 }
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001102 if (matched) {
1103 if (not)
1104 rec->flags &= ~flag;
1105 else
1106 rec->flags |= flag;
1107 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001108 }
1109 pg = pg->next;
1110 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001111 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001112}
1113
Ingo Molnare309b412008-05-12 21:20:51 +02001114static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001115ftrace_regex_write(struct file *file, const char __user *ubuf,
1116 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001117{
1118 struct ftrace_iterator *iter;
1119 char ch;
1120 size_t read = 0;
1121 ssize_t ret;
1122
1123 if (!cnt || cnt < 0)
1124 return 0;
1125
Steven Rostedt41c52c02008-05-22 11:46:33 -04001126 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001127
1128 if (file->f_mode & FMODE_READ) {
1129 struct seq_file *m = file->private_data;
1130 iter = m->private;
1131 } else
1132 iter = file->private_data;
1133
1134 if (!*ppos) {
1135 iter->flags &= ~FTRACE_ITER_CONT;
1136 iter->buffer_idx = 0;
1137 }
1138
1139 ret = get_user(ch, ubuf++);
1140 if (ret)
1141 goto out;
1142 read++;
1143 cnt--;
1144
1145 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1146 /* skip white space */
1147 while (cnt && isspace(ch)) {
1148 ret = get_user(ch, ubuf++);
1149 if (ret)
1150 goto out;
1151 read++;
1152 cnt--;
1153 }
1154
Steven Rostedt5072c592008-05-12 21:20:43 +02001155 if (isspace(ch)) {
1156 file->f_pos += read;
1157 ret = read;
1158 goto out;
1159 }
1160
1161 iter->buffer_idx = 0;
1162 }
1163
1164 while (cnt && !isspace(ch)) {
1165 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1166 iter->buffer[iter->buffer_idx++] = ch;
1167 else {
1168 ret = -EINVAL;
1169 goto out;
1170 }
1171 ret = get_user(ch, ubuf++);
1172 if (ret)
1173 goto out;
1174 read++;
1175 cnt--;
1176 }
1177
1178 if (isspace(ch)) {
1179 iter->filtered++;
1180 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001181 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001182 iter->buffer_idx = 0;
1183 } else
1184 iter->flags |= FTRACE_ITER_CONT;
1185
1186
1187 file->f_pos += read;
1188
1189 ret = read;
1190 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001191 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001192
1193 return ret;
1194}
1195
Steven Rostedt41c52c02008-05-22 11:46:33 -04001196static ssize_t
1197ftrace_filter_write(struct file *file, const char __user *ubuf,
1198 size_t cnt, loff_t *ppos)
1199{
1200 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1201}
1202
1203static ssize_t
1204ftrace_notrace_write(struct file *file, const char __user *ubuf,
1205 size_t cnt, loff_t *ppos)
1206{
1207 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1208}
1209
1210static void
1211ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1212{
1213 if (unlikely(ftrace_disabled))
1214 return;
1215
1216 mutex_lock(&ftrace_regex_lock);
1217 if (reset)
1218 ftrace_filter_reset(enable);
1219 if (buf)
1220 ftrace_match(buf, len, enable);
1221 mutex_unlock(&ftrace_regex_lock);
1222}
1223
Steven Rostedt77a2b372008-05-12 21:20:45 +02001224/**
1225 * ftrace_set_filter - set a function to filter on in ftrace
1226 * @buf - the string that holds the function filter text.
1227 * @len - the length of the string.
1228 * @reset - non zero to reset all filters before applying this filter.
1229 *
1230 * Filters denote which functions should be enabled when tracing is enabled.
1231 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1232 */
Ingo Molnare309b412008-05-12 21:20:51 +02001233void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001234{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001235 ftrace_set_regex(buf, len, reset, 1);
1236}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001237
Steven Rostedt41c52c02008-05-22 11:46:33 -04001238/**
1239 * ftrace_set_notrace - set a function to not trace in ftrace
1240 * @buf - the string that holds the function notrace text.
1241 * @len - the length of the string.
1242 * @reset - non zero to reset all filters before applying this filter.
1243 *
1244 * Notrace Filters denote which functions should not be enabled when tracing
1245 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1246 * for tracing.
1247 */
1248void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1249{
1250 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001251}
1252
Ingo Molnare309b412008-05-12 21:20:51 +02001253static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001254ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001255{
1256 struct seq_file *m = (struct seq_file *)file->private_data;
1257 struct ftrace_iterator *iter;
1258
Steven Rostedt41c52c02008-05-22 11:46:33 -04001259 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001260 if (file->f_mode & FMODE_READ) {
1261 iter = m->private;
1262
1263 seq_release(inode, file);
1264 } else
1265 iter = file->private_data;
1266
1267 if (iter->buffer_idx) {
1268 iter->filtered++;
1269 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001270 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001271 }
1272
1273 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001274 mutex_lock(&ftrace_start_lock);
Steven Rostedtee02a2e2008-11-15 16:31:41 -05001275 if (ftrace_start_up && ftrace_enabled)
Steven Rostedt5072c592008-05-12 21:20:43 +02001276 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001277 mutex_unlock(&ftrace_start_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001278 mutex_unlock(&ftrace_sysctl_lock);
1279
1280 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001281 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001282 return 0;
1283}
1284
Steven Rostedt41c52c02008-05-22 11:46:33 -04001285static int
1286ftrace_filter_release(struct inode *inode, struct file *file)
1287{
1288 return ftrace_regex_release(inode, file, 1);
1289}
1290
1291static int
1292ftrace_notrace_release(struct inode *inode, struct file *file)
1293{
1294 return ftrace_regex_release(inode, file, 0);
1295}
1296
Steven Rostedt5072c592008-05-12 21:20:43 +02001297static struct file_operations ftrace_avail_fops = {
1298 .open = ftrace_avail_open,
1299 .read = seq_read,
1300 .llseek = seq_lseek,
1301 .release = ftrace_avail_release,
1302};
1303
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301304static struct file_operations ftrace_failures_fops = {
1305 .open = ftrace_failures_open,
1306 .read = seq_read,
1307 .llseek = seq_lseek,
1308 .release = ftrace_avail_release,
1309};
1310
Steven Rostedt5072c592008-05-12 21:20:43 +02001311static struct file_operations ftrace_filter_fops = {
1312 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001313 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001314 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001315 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001316 .release = ftrace_filter_release,
1317};
1318
Steven Rostedt41c52c02008-05-22 11:46:33 -04001319static struct file_operations ftrace_notrace_fops = {
1320 .open = ftrace_notrace_open,
1321 .read = ftrace_regex_read,
1322 .write = ftrace_notrace_write,
1323 .llseek = ftrace_regex_lseek,
1324 .release = ftrace_notrace_release,
1325};
1326
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001327#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1328
1329static DEFINE_MUTEX(graph_lock);
1330
1331int ftrace_graph_count;
1332unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1333
1334static void *
1335g_next(struct seq_file *m, void *v, loff_t *pos)
1336{
1337 unsigned long *array = m->private;
1338 int index = *pos;
1339
1340 (*pos)++;
1341
1342 if (index >= ftrace_graph_count)
1343 return NULL;
1344
1345 return &array[index];
1346}
1347
1348static void *g_start(struct seq_file *m, loff_t *pos)
1349{
1350 void *p = NULL;
1351
1352 mutex_lock(&graph_lock);
1353
1354 p = g_next(m, p, pos);
1355
1356 return p;
1357}
1358
1359static void g_stop(struct seq_file *m, void *p)
1360{
1361 mutex_unlock(&graph_lock);
1362}
1363
1364static int g_show(struct seq_file *m, void *v)
1365{
1366 unsigned long *ptr = v;
1367 char str[KSYM_SYMBOL_LEN];
1368
1369 if (!ptr)
1370 return 0;
1371
1372 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1373
1374 seq_printf(m, "%s\n", str);
1375
1376 return 0;
1377}
1378
1379static struct seq_operations ftrace_graph_seq_ops = {
1380 .start = g_start,
1381 .next = g_next,
1382 .stop = g_stop,
1383 .show = g_show,
1384};
1385
1386static int
1387ftrace_graph_open(struct inode *inode, struct file *file)
1388{
1389 int ret = 0;
1390
1391 if (unlikely(ftrace_disabled))
1392 return -ENODEV;
1393
1394 mutex_lock(&graph_lock);
1395 if ((file->f_mode & FMODE_WRITE) &&
1396 !(file->f_flags & O_APPEND)) {
1397 ftrace_graph_count = 0;
1398 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1399 }
1400
1401 if (file->f_mode & FMODE_READ) {
1402 ret = seq_open(file, &ftrace_graph_seq_ops);
1403 if (!ret) {
1404 struct seq_file *m = file->private_data;
1405 m->private = ftrace_graph_funcs;
1406 }
1407 } else
1408 file->private_data = ftrace_graph_funcs;
1409 mutex_unlock(&graph_lock);
1410
1411 return ret;
1412}
1413
1414static ssize_t
1415ftrace_graph_read(struct file *file, char __user *ubuf,
1416 size_t cnt, loff_t *ppos)
1417{
1418 if (file->f_mode & FMODE_READ)
1419 return seq_read(file, ubuf, cnt, ppos);
1420 else
1421 return -EPERM;
1422}
1423
1424static int
1425ftrace_set_func(unsigned long *array, int idx, char *buffer)
1426{
1427 char str[KSYM_SYMBOL_LEN];
1428 struct dyn_ftrace *rec;
1429 struct ftrace_page *pg;
1430 int found = 0;
Liming Wangfaec2ec2008-12-04 14:24:49 +08001431 int i, j;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001432
1433 if (ftrace_disabled)
1434 return -ENODEV;
1435
1436 /* should not be called from interrupt context */
1437 spin_lock(&ftrace_lock);
1438
1439 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1440 for (i = 0; i < pg->index; i++) {
1441 rec = &pg->records[i];
1442
1443 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1444 continue;
1445
1446 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1447 if (strcmp(str, buffer) == 0) {
1448 found = 1;
Liming Wangfaec2ec2008-12-04 14:24:49 +08001449 for (j = 0; j < idx; j++)
1450 if (array[j] == rec->ip) {
1451 found = 0;
1452 break;
1453 }
1454 if (found)
1455 array[idx] = rec->ip;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001456 break;
1457 }
1458 }
1459 }
1460 spin_unlock(&ftrace_lock);
1461
1462 return found ? 0 : -EINVAL;
1463}
1464
1465static ssize_t
1466ftrace_graph_write(struct file *file, const char __user *ubuf,
1467 size_t cnt, loff_t *ppos)
1468{
1469 unsigned char buffer[FTRACE_BUFF_MAX+1];
1470 unsigned long *array;
1471 size_t read = 0;
1472 ssize_t ret;
1473 int index = 0;
1474 char ch;
1475
1476 if (!cnt || cnt < 0)
1477 return 0;
1478
1479 mutex_lock(&graph_lock);
1480
1481 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1482 ret = -EBUSY;
1483 goto out;
1484 }
1485
1486 if (file->f_mode & FMODE_READ) {
1487 struct seq_file *m = file->private_data;
1488 array = m->private;
1489 } else
1490 array = file->private_data;
1491
1492 ret = get_user(ch, ubuf++);
1493 if (ret)
1494 goto out;
1495 read++;
1496 cnt--;
1497
1498 /* skip white space */
1499 while (cnt && isspace(ch)) {
1500 ret = get_user(ch, ubuf++);
1501 if (ret)
1502 goto out;
1503 read++;
1504 cnt--;
1505 }
1506
1507 if (isspace(ch)) {
1508 *ppos += read;
1509 ret = read;
1510 goto out;
1511 }
1512
1513 while (cnt && !isspace(ch)) {
1514 if (index < FTRACE_BUFF_MAX)
1515 buffer[index++] = ch;
1516 else {
1517 ret = -EINVAL;
1518 goto out;
1519 }
1520 ret = get_user(ch, ubuf++);
1521 if (ret)
1522 goto out;
1523 read++;
1524 cnt--;
1525 }
1526 buffer[index] = 0;
1527
1528 /* we allow only one at a time */
1529 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1530 if (ret)
1531 goto out;
1532
1533 ftrace_graph_count++;
1534
1535 file->f_pos += read;
1536
1537 ret = read;
1538 out:
1539 mutex_unlock(&graph_lock);
1540
1541 return ret;
1542}
1543
1544static const struct file_operations ftrace_graph_fops = {
1545 .open = ftrace_graph_open,
1546 .read = ftrace_graph_read,
1547 .write = ftrace_graph_write,
1548};
1549#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1550
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001551static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02001552{
Steven Rostedt5072c592008-05-12 21:20:43 +02001553 struct dentry *entry;
1554
Steven Rostedt5072c592008-05-12 21:20:43 +02001555 entry = debugfs_create_file("available_filter_functions", 0444,
1556 d_tracer, NULL, &ftrace_avail_fops);
1557 if (!entry)
1558 pr_warning("Could not create debugfs "
1559 "'available_filter_functions' entry\n");
1560
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301561 entry = debugfs_create_file("failures", 0444,
1562 d_tracer, NULL, &ftrace_failures_fops);
1563 if (!entry)
1564 pr_warning("Could not create debugfs 'failures' entry\n");
1565
Steven Rostedt5072c592008-05-12 21:20:43 +02001566 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1567 NULL, &ftrace_filter_fops);
1568 if (!entry)
1569 pr_warning("Could not create debugfs "
1570 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001571
1572 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1573 NULL, &ftrace_notrace_fops);
1574 if (!entry)
1575 pr_warning("Could not create debugfs "
1576 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001577
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001578#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1579 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1580 NULL,
1581 &ftrace_graph_fops);
1582 if (!entry)
1583 pr_warning("Could not create debugfs "
1584 "'set_graph_function' entry\n");
1585#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1586
Steven Rostedt5072c592008-05-12 21:20:43 +02001587 return 0;
1588}
1589
Steven Rostedt31e88902008-11-14 16:21:19 -08001590static int ftrace_convert_nops(struct module *mod,
1591 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001592 unsigned long *end)
1593{
1594 unsigned long *p;
1595 unsigned long addr;
1596 unsigned long flags;
1597
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001598 mutex_lock(&ftrace_start_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001599 p = start;
1600 while (p < end) {
1601 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08001602 /*
1603 * Some architecture linkers will pad between
1604 * the different mcount_loc sections of different
1605 * object files to satisfy alignments.
1606 * Skip any NULL pointers.
1607 */
1608 if (!addr)
1609 continue;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001610 ftrace_record_ip(addr);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001611 }
1612
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001613 /* disable interrupts to prevent kstop machine */
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001614 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08001615 ftrace_update_code(mod);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001616 local_irq_restore(flags);
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001617 mutex_unlock(&ftrace_start_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001618
1619 return 0;
1620}
1621
Steven Rostedt31e88902008-11-14 16:21:19 -08001622void ftrace_init_module(struct module *mod,
1623 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04001624{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001625 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001626 return;
Steven Rostedt31e88902008-11-14 16:21:19 -08001627 ftrace_convert_nops(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04001628}
1629
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001630extern unsigned long __start_mcount_loc[];
1631extern unsigned long __stop_mcount_loc[];
1632
1633void __init ftrace_init(void)
1634{
1635 unsigned long count, addr, flags;
1636 int ret;
1637
1638 /* Keep the ftrace pointer to the stub */
1639 addr = (unsigned long)ftrace_stub;
1640
1641 local_irq_save(flags);
1642 ftrace_dyn_arch_init(&addr);
1643 local_irq_restore(flags);
1644
1645 /* ftrace_dyn_arch_init places the return code in addr */
1646 if (addr)
1647 goto failed;
1648
1649 count = __stop_mcount_loc - __start_mcount_loc;
1650
1651 ret = ftrace_dyn_table_alloc(count);
1652 if (ret)
1653 goto failed;
1654
1655 last_ftrace_enabled = ftrace_enabled = 1;
1656
Steven Rostedt31e88902008-11-14 16:21:19 -08001657 ret = ftrace_convert_nops(NULL,
1658 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001659 __stop_mcount_loc);
1660
1661 return;
1662 failed:
1663 ftrace_disabled = 1;
1664}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001665
Steven Rostedt3d083392008-05-12 21:20:42 +02001666#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01001667
1668static int __init ftrace_nodyn_init(void)
1669{
1670 ftrace_enabled = 1;
1671 return 0;
1672}
1673device_initcall(ftrace_nodyn_init);
1674
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001675static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1676static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001677/* Keep as macros so we do not need to define the commands */
1678# define ftrace_startup(command) do { } while (0)
1679# define ftrace_shutdown(command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001680# define ftrace_startup_sysctl() do { } while (0)
1681# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001682#endif /* CONFIG_DYNAMIC_FTRACE */
1683
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001684static ssize_t
1685ftrace_pid_read(struct file *file, char __user *ubuf,
1686 size_t cnt, loff_t *ppos)
1687{
1688 char buf[64];
1689 int r;
1690
Steven Rostedte32d8952008-12-04 00:26:41 -05001691 if (ftrace_pid_trace == ftrace_swapper_pid)
1692 r = sprintf(buf, "swapper tasks\n");
1693 else if (ftrace_pid_trace)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001694 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001695 else
1696 r = sprintf(buf, "no pid\n");
1697
1698 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1699}
1700
Steven Rostedte32d8952008-12-04 00:26:41 -05001701static void clear_ftrace_swapper(void)
1702{
1703 struct task_struct *p;
1704 int cpu;
1705
1706 get_online_cpus();
1707 for_each_online_cpu(cpu) {
1708 p = idle_task(cpu);
1709 clear_tsk_trace_trace(p);
1710 }
1711 put_online_cpus();
1712}
1713
1714static void set_ftrace_swapper(void)
1715{
1716 struct task_struct *p;
1717 int cpu;
1718
1719 get_online_cpus();
1720 for_each_online_cpu(cpu) {
1721 p = idle_task(cpu);
1722 set_tsk_trace_trace(p);
1723 }
1724 put_online_cpus();
1725}
1726
1727static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001728{
1729 struct task_struct *p;
1730
Steven Rostedte32d8952008-12-04 00:26:41 -05001731 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05001732 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05001733 } while_each_pid_task(pid, PIDTYPE_PID, p);
1734 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05001735}
1736
Steven Rostedte32d8952008-12-04 00:26:41 -05001737static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001738{
1739 struct task_struct *p;
1740
1741 do_each_pid_task(pid, PIDTYPE_PID, p) {
1742 set_tsk_trace_trace(p);
1743 } while_each_pid_task(pid, PIDTYPE_PID, p);
1744}
1745
Steven Rostedte32d8952008-12-04 00:26:41 -05001746static void clear_ftrace_pid_task(struct pid **pid)
1747{
1748 if (*pid == ftrace_swapper_pid)
1749 clear_ftrace_swapper();
1750 else
1751 clear_ftrace_pid(*pid);
1752
1753 *pid = NULL;
1754}
1755
1756static void set_ftrace_pid_task(struct pid *pid)
1757{
1758 if (pid == ftrace_swapper_pid)
1759 set_ftrace_swapper();
1760 else
1761 set_ftrace_pid(pid);
1762}
1763
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001764static ssize_t
1765ftrace_pid_write(struct file *filp, const char __user *ubuf,
1766 size_t cnt, loff_t *ppos)
1767{
Steven Rostedt978f3a42008-12-04 00:26:40 -05001768 struct pid *pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001769 char buf[64];
1770 long val;
1771 int ret;
1772
1773 if (cnt >= sizeof(buf))
1774 return -EINVAL;
1775
1776 if (copy_from_user(&buf, ubuf, cnt))
1777 return -EFAULT;
1778
1779 buf[cnt] = 0;
1780
1781 ret = strict_strtol(buf, 10, &val);
1782 if (ret < 0)
1783 return ret;
1784
1785 mutex_lock(&ftrace_start_lock);
Steven Rostedt978f3a42008-12-04 00:26:40 -05001786 if (val < 0) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001787 /* disable pid tracing */
Steven Rostedt978f3a42008-12-04 00:26:40 -05001788 if (!ftrace_pid_trace)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001789 goto out;
Steven Rostedt978f3a42008-12-04 00:26:40 -05001790
1791 clear_ftrace_pid_task(&ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001792
1793 } else {
Steven Rostedte32d8952008-12-04 00:26:41 -05001794 /* swapper task is special */
1795 if (!val) {
1796 pid = ftrace_swapper_pid;
1797 if (pid == ftrace_pid_trace)
1798 goto out;
1799 } else {
1800 pid = find_get_pid(val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001801
Steven Rostedte32d8952008-12-04 00:26:41 -05001802 if (pid == ftrace_pid_trace) {
1803 put_pid(pid);
1804 goto out;
1805 }
Steven Rostedt978f3a42008-12-04 00:26:40 -05001806 }
1807
1808 if (ftrace_pid_trace)
1809 clear_ftrace_pid_task(&ftrace_pid_trace);
1810
1811 if (!pid)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001812 goto out;
1813
Steven Rostedt978f3a42008-12-04 00:26:40 -05001814 ftrace_pid_trace = pid;
Steven Rostedt0ef8cde2008-12-03 15:36:58 -05001815
Steven Rostedt978f3a42008-12-04 00:26:40 -05001816 set_ftrace_pid_task(ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001817 }
1818
1819 /* update the function call */
1820 ftrace_update_pid_func();
1821 ftrace_startup_enable(0);
1822
1823 out:
1824 mutex_unlock(&ftrace_start_lock);
1825
1826 return cnt;
1827}
1828
1829static struct file_operations ftrace_pid_fops = {
1830 .read = ftrace_pid_read,
1831 .write = ftrace_pid_write,
1832};
1833
1834static __init int ftrace_init_debugfs(void)
1835{
1836 struct dentry *d_tracer;
1837 struct dentry *entry;
1838
1839 d_tracer = tracing_init_dentry();
1840 if (!d_tracer)
1841 return 0;
1842
1843 ftrace_init_dyn_debugfs(d_tracer);
1844
1845 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1846 NULL, &ftrace_pid_fops);
1847 if (!entry)
1848 pr_warning("Could not create debugfs "
1849 "'set_ftrace_pid' entry\n");
1850 return 0;
1851}
1852
1853fs_initcall(ftrace_init_debugfs);
1854
Steven Rostedt3d083392008-05-12 21:20:42 +02001855/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04001856 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001857 *
1858 * This function should be used by panic code. It stops ftrace
1859 * but in a not so nice way. If you need to simply kill ftrace
1860 * from a non-atomic section, use ftrace_kill.
1861 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04001862void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001863{
1864 ftrace_disabled = 1;
1865 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001866 clear_ftrace_function();
1867}
1868
1869/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001870 * register_ftrace_function - register a function for profiling
1871 * @ops - ops structure that holds the function for profiling.
1872 *
1873 * Register a function to be called by all functions in the
1874 * kernel.
1875 *
1876 * Note: @ops->func and all the functions it calls must be labeled
1877 * with "notrace", otherwise it will go into a
1878 * recursive loop.
1879 */
1880int register_ftrace_function(struct ftrace_ops *ops)
1881{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001882 int ret;
1883
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001884 if (unlikely(ftrace_disabled))
1885 return -1;
1886
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001887 mutex_lock(&ftrace_sysctl_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001888
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001889 ret = __register_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001890 ftrace_startup(0);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001891
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001892 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001893 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001894}
1895
1896/**
1897 * unregister_ftrace_function - unresgister a function for profiling.
1898 * @ops - ops structure that holds the function to unregister
1899 *
1900 * Unregister a function that was added to be called by ftrace profiling.
1901 */
1902int unregister_ftrace_function(struct ftrace_ops *ops)
1903{
1904 int ret;
1905
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001906 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001907 ret = __unregister_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001908 ftrace_shutdown(0);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001909 mutex_unlock(&ftrace_sysctl_lock);
1910
1911 return ret;
1912}
1913
Ingo Molnare309b412008-05-12 21:20:51 +02001914int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001915ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001916 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001917 loff_t *ppos)
1918{
1919 int ret;
1920
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001921 if (unlikely(ftrace_disabled))
1922 return -ENODEV;
1923
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001924 mutex_lock(&ftrace_sysctl_lock);
1925
Steven Rostedt5072c592008-05-12 21:20:43 +02001926 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001927
1928 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1929 goto out;
1930
1931 last_ftrace_enabled = ftrace_enabled;
1932
1933 if (ftrace_enabled) {
1934
1935 ftrace_startup_sysctl();
1936
1937 /* we are starting ftrace again */
1938 if (ftrace_list != &ftrace_list_end) {
1939 if (ftrace_list->next == &ftrace_list_end)
1940 ftrace_trace_function = ftrace_list->func;
1941 else
1942 ftrace_trace_function = ftrace_list_func;
1943 }
1944
1945 } else {
1946 /* stopping ftrace calls (just send to ftrace_stub) */
1947 ftrace_trace_function = ftrace_stub;
1948
1949 ftrace_shutdown_sysctl();
1950 }
1951
1952 out:
1953 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001954 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001955}
Ingo Molnarf17845e2008-10-24 12:47:10 +02001956
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001957#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001958
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001959static atomic_t ftrace_graph_active;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001960
Steven Rostedte49dc192008-12-02 23:50:05 -05001961int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1962{
1963 return 0;
1964}
1965
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001966/* The callbacks that hook a function */
1967trace_func_graph_ret_t ftrace_graph_return =
1968 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05001969trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001970
1971/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1972static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1973{
1974 int i;
1975 int ret = 0;
1976 unsigned long flags;
1977 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1978 struct task_struct *g, *t;
1979
1980 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1981 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1982 * sizeof(struct ftrace_ret_stack),
1983 GFP_KERNEL);
1984 if (!ret_stack_list[i]) {
1985 start = 0;
1986 end = i;
1987 ret = -ENOMEM;
1988 goto free;
1989 }
1990 }
1991
1992 read_lock_irqsave(&tasklist_lock, flags);
1993 do_each_thread(g, t) {
1994 if (start == end) {
1995 ret = -EAGAIN;
1996 goto unlock;
1997 }
1998
1999 if (t->ret_stack == NULL) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002000 t->curr_ret_stack = -1;
Frederic Weisbecker48d68b22008-12-02 00:20:39 +01002001 /* Make sure IRQs see the -1 first: */
2002 barrier();
2003 t->ret_stack = ret_stack_list[start++];
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01002004 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002005 atomic_set(&t->trace_overrun, 0);
2006 }
2007 } while_each_thread(g, t);
2008
2009unlock:
2010 read_unlock_irqrestore(&tasklist_lock, flags);
2011free:
2012 for (i = start; i < end; i++)
2013 kfree(ret_stack_list[i]);
2014 return ret;
2015}
2016
2017/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002018static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002019{
2020 struct ftrace_ret_stack **ret_stack_list;
2021 int ret;
2022
2023 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2024 sizeof(struct ftrace_ret_stack *),
2025 GFP_KERNEL);
2026
2027 if (!ret_stack_list)
2028 return -ENOMEM;
2029
2030 do {
2031 ret = alloc_retstack_tasklist(ret_stack_list);
2032 } while (ret == -EAGAIN);
2033
2034 kfree(ret_stack_list);
2035 return ret;
2036}
2037
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002038int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2039 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002040{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002041 int ret = 0;
2042
2043 mutex_lock(&ftrace_sysctl_lock);
2044
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002045 atomic_inc(&ftrace_graph_active);
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002046 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002047 if (ret) {
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002048 atomic_dec(&ftrace_graph_active);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002049 goto out;
2050 }
Steven Rostedte53a6312008-11-26 00:16:25 -05002051
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002052 ftrace_graph_return = retfunc;
2053 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05002054
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002055 ftrace_startup(FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002056
2057out:
2058 mutex_unlock(&ftrace_sysctl_lock);
2059 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002060}
2061
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002062void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002063{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002064 mutex_lock(&ftrace_sysctl_lock);
2065
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002066 atomic_dec(&ftrace_graph_active);
2067 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05002068 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002069 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002070
2071 mutex_unlock(&ftrace_sysctl_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002072}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002073
2074/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002075void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002076{
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002077 if (atomic_read(&ftrace_graph_active)) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002078 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2079 * sizeof(struct ftrace_ret_stack),
2080 GFP_KERNEL);
2081 if (!t->ret_stack)
2082 return;
2083 t->curr_ret_stack = -1;
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01002084 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002085 atomic_set(&t->trace_overrun, 0);
2086 } else
2087 t->ret_stack = NULL;
2088}
2089
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002090void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002091{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01002092 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2093
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002094 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01002095 /* NULL must become visible to IRQs before we free it: */
2096 barrier();
2097
2098 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002099}
Steven Rostedt14a866c2008-12-02 23:50:02 -05002100
2101void ftrace_graph_stop(void)
2102{
2103 ftrace_stop();
2104}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002105#endif
2106