blob: 369fb78bd4abbc80e269de4de50a8bfd23276dd1 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Abhishek Sagarf22f9a82008-06-21 23:50:29 +053025#include <linux/kprobes.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010026#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020027#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020028#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020030
Abhishek Sagar395a59d2008-06-21 23:47:27 +053031#include <asm/ftrace.h>
32
Steven Rostedt3d083392008-05-12 21:20:42 +020033#include "trace.h"
34
Steven Rostedt6912896e2008-10-23 09:33:03 -040035#define FTRACE_WARN_ON(cond) \
36 do { \
37 if (WARN_ON(cond)) \
38 ftrace_kill(); \
39 } while (0)
40
41#define FTRACE_WARN_ON_ONCE(cond) \
42 do { \
43 if (WARN_ON_ONCE(cond)) \
44 ftrace_kill(); \
45 } while (0)
46
Steven Rostedt4eebcc82008-05-12 21:20:48 +020047/* ftrace_enabled is a method to turn ftrace on or off */
48int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020049static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020050
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050051/* set when tracing only a pid */
Steven Rostedt978f3a42008-12-04 00:26:40 -050052struct pid *ftrace_pid_trace;
Steven Rostedt21bbecd2008-12-04 23:30:56 -050053static struct pid * const ftrace_swapper_pid = &init_struct_pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050054
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050055/* Quick disabling of function tracer. */
56int function_trace_stop;
57
Steven Rostedt4eebcc82008-05-12 21:20:48 +020058/*
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
61 */
62static int ftrace_disabled __read_mostly;
63
Steven Rostedt3d083392008-05-12 21:20:42 +020064static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020065static DEFINE_MUTEX(ftrace_sysctl_lock);
Steven Rostedtdf4fc312008-11-26 00:16:23 -050066static DEFINE_MUTEX(ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020067
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020068static struct ftrace_ops ftrace_list_end __read_mostly =
69{
70 .func = ftrace_stub,
71};
72
73static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
74ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050075ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050076ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020077
Ingo Molnarf2252932008-05-22 10:37:48 +020078static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020079{
80 struct ftrace_ops *op = ftrace_list;
81
82 /* in case someone actually ports this to alpha! */
83 read_barrier_depends();
84
85 while (op != &ftrace_list_end) {
86 /* silly alpha */
87 read_barrier_depends();
88 op->func(ip, parent_ip);
89 op = op->next;
90 };
91}
92
Steven Rostedtdf4fc312008-11-26 00:16:23 -050093static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -050095 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -050096 return;
97
98 ftrace_pid_function(ip, parent_ip);
99}
100
101static void set_ftrace_pid_function(ftrace_func_t func)
102{
103 /* do not set ftrace_pid_function to itself! */
104 if (func != ftrace_pid_func)
105 ftrace_pid_function = func;
106}
107
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200108/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200109 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200110 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200111 * This NULLs the ftrace function and in essence stops
112 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200113 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200114void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115{
Steven Rostedt3d083392008-05-12 21:20:42 +0200116 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500117 __ftrace_trace_function = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500118 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200119}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200120
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500121#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122/*
123 * For those archs that do not test ftrace_trace_stop in their
124 * mcount call site, we need to do it from C.
125 */
126static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127{
128 if (function_trace_stop)
129 return;
130
131 __ftrace_trace_function(ip, parent_ip);
132}
133#endif
134
Ingo Molnare309b412008-05-12 21:20:51 +0200135static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200136{
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400137 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200138 spin_lock(&ftrace_lock);
139
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200140 ops->next = ftrace_list;
141 /*
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
146 */
147 smp_wmb();
148 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +0200149
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200150 if (ftrace_enabled) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500151 ftrace_func_t func;
152
153 if (ops->next == &ftrace_list_end)
154 func = ops->func;
155 else
156 func = ftrace_list_func;
157
Steven Rostedt978f3a42008-12-04 00:26:40 -0500158 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
161 }
162
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200163 /*
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
166 */
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500167#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500168 ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500169#else
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500170 __ftrace_trace_function = func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500171 ftrace_trace_function = ftrace_test_stop_func;
172#endif
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200173 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200174
175 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200176
177 return 0;
178}
179
Ingo Molnare309b412008-05-12 21:20:51 +0200180static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200181{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200182 struct ftrace_ops **p;
183 int ret = 0;
184
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400185 /* should not be called from interrupt context */
Steven Rostedt3d083392008-05-12 21:20:42 +0200186 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200187
188 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200189 * If we are removing the last function, then simply point
190 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200191 */
192 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193 ftrace_trace_function = ftrace_stub;
194 ftrace_list = &ftrace_list_end;
195 goto out;
196 }
197
198 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
199 if (*p == ops)
200 break;
201
202 if (*p != ops) {
203 ret = -1;
204 goto out;
205 }
206
207 *p = (*p)->next;
208
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200209 if (ftrace_enabled) {
210 /* If we only have one func left, then call that directly */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500211 if (ftrace_list->next == &ftrace_list_end) {
212 ftrace_func_t func = ftrace_list->func;
213
Steven Rostedt978f3a42008-12-04 00:26:40 -0500214 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500215 set_ftrace_pid_function(func);
216 func = ftrace_pid_func;
217 }
218#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219 ftrace_trace_function = func;
220#else
221 __ftrace_trace_function = func;
222#endif
223 }
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200224 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200225
226 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200227 spin_unlock(&ftrace_lock);
228
229 return ret;
230}
231
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500232static void ftrace_update_pid_func(void)
233{
234 ftrace_func_t func;
235
236 /* should not be called from interrupt context */
237 spin_lock(&ftrace_lock);
238
239 if (ftrace_trace_function == ftrace_stub)
240 goto out;
241
242 func = ftrace_trace_function;
243
Steven Rostedt978f3a42008-12-04 00:26:40 -0500244 if (ftrace_pid_trace) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500245 set_ftrace_pid_function(func);
246 func = ftrace_pid_func;
247 } else {
Liming Wang66eafeb2008-12-02 10:33:08 +0800248 if (func == ftrace_pid_func)
249 func = ftrace_pid_function;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500250 }
251
252#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
254#else
255 __ftrace_trace_function = func;
256#endif
257
258 out:
259 spin_unlock(&ftrace_lock);
260}
261
Steven Rostedt3d083392008-05-12 21:20:42 +0200262#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400263#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400264# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400265#endif
266
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200267enum {
268 FTRACE_ENABLE_CALLS = (1 << 0),
269 FTRACE_DISABLE_CALLS = (1 << 1),
270 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
271 FTRACE_ENABLE_MCOUNT = (1 << 3),
272 FTRACE_DISABLE_MCOUNT = (1 << 4),
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500273 FTRACE_START_FUNC_RET = (1 << 5),
274 FTRACE_STOP_FUNC_RET = (1 << 6),
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200275};
276
Steven Rostedt5072c592008-05-12 21:20:43 +0200277static int ftrace_filtered;
278
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400279static LIST_HEAD(ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200280
Steven Rostedt41c52c02008-05-22 11:46:33 -0400281static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200282
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200283struct ftrace_page {
284 struct ftrace_page *next;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500285 int index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200286 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700287};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200288
289#define ENTRIES_PER_PAGE \
290 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
291
292/* estimate from running different kernels */
293#define NR_TO_INIT 10000
294
295static struct ftrace_page *ftrace_pages_start;
296static struct ftrace_page *ftrace_pages;
297
Steven Rostedt37ad50842008-05-12 21:20:48 +0200298static struct dyn_ftrace *ftrace_free_records;
299
Abhishek Sagarecea6562008-06-21 23:47:53 +0530300
301#ifdef CONFIG_KPROBES
Ingo Molnarf17845e2008-10-24 12:47:10 +0200302
303static int frozen_record_count;
304
Abhishek Sagarecea6562008-06-21 23:47:53 +0530305static inline void freeze_record(struct dyn_ftrace *rec)
306{
307 if (!(rec->flags & FTRACE_FL_FROZEN)) {
308 rec->flags |= FTRACE_FL_FROZEN;
309 frozen_record_count++;
310 }
311}
312
313static inline void unfreeze_record(struct dyn_ftrace *rec)
314{
315 if (rec->flags & FTRACE_FL_FROZEN) {
316 rec->flags &= ~FTRACE_FL_FROZEN;
317 frozen_record_count--;
318 }
319}
320
321static inline int record_frozen(struct dyn_ftrace *rec)
322{
323 return rec->flags & FTRACE_FL_FROZEN;
324}
325#else
326# define freeze_record(rec) ({ 0; })
327# define unfreeze_record(rec) ({ 0; })
328# define record_frozen(rec) ({ 0; })
329#endif /* CONFIG_KPROBES */
330
Ingo Molnare309b412008-05-12 21:20:51 +0200331static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad50842008-05-12 21:20:48 +0200332{
Steven Rostedt37ad50842008-05-12 21:20:48 +0200333 rec->ip = (unsigned long)ftrace_free_records;
334 ftrace_free_records = rec;
335 rec->flags |= FTRACE_FL_FREE;
336}
337
Steven Rostedtfed19392008-08-14 22:47:19 -0400338void ftrace_release(void *start, unsigned long size)
339{
340 struct dyn_ftrace *rec;
341 struct ftrace_page *pg;
342 unsigned long s = (unsigned long)start;
343 unsigned long e = s + size;
344 int i;
345
Steven Rostedt00fd61a2008-08-15 21:40:04 -0400346 if (ftrace_disabled || !start)
Steven Rostedtfed19392008-08-14 22:47:19 -0400347 return;
348
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400349 /* should not be called from interrupt context */
Steven Rostedtfed19392008-08-14 22:47:19 -0400350 spin_lock(&ftrace_lock);
351
352 for (pg = ftrace_pages_start; pg; pg = pg->next) {
353 for (i = 0; i < pg->index; i++) {
354 rec = &pg->records[i];
355
356 if ((rec->ip >= s) && (rec->ip < e))
357 ftrace_free_rec(rec);
358 }
359 }
360 spin_unlock(&ftrace_lock);
Steven Rostedtfed19392008-08-14 22:47:19 -0400361}
362
Ingo Molnare309b412008-05-12 21:20:51 +0200363static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200364{
Steven Rostedt37ad50842008-05-12 21:20:48 +0200365 struct dyn_ftrace *rec;
366
367 /* First check for freed records */
368 if (ftrace_free_records) {
369 rec = ftrace_free_records;
370
Steven Rostedt37ad50842008-05-12 21:20:48 +0200371 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
Steven Rostedt6912896e2008-10-23 09:33:03 -0400372 FTRACE_WARN_ON_ONCE(1);
Steven Rostedt37ad50842008-05-12 21:20:48 +0200373 ftrace_free_records = NULL;
374 return NULL;
375 }
376
377 ftrace_free_records = (void *)rec->ip;
378 memset(rec, 0, sizeof(*rec));
379 return rec;
380 }
381
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200382 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400383 if (!ftrace_pages->next) {
384 /* allocate another page */
385 ftrace_pages->next =
386 (void *)get_zeroed_page(GFP_KERNEL);
387 if (!ftrace_pages->next)
388 return NULL;
389 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200390 ftrace_pages = ftrace_pages->next;
391 }
392
393 return &ftrace_pages->records[ftrace_pages->index++];
394}
395
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400396static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200397ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200398{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400399 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200400
Steven Rostedtf3c7ac42008-11-14 16:21:19 -0800401 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400402 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200403
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400404 rec = ftrace_alloc_dyn_node(ip);
405 if (!rec)
406 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +0200407
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400408 rec->ip = ip;
Steven Rostedt3d083392008-05-12 21:20:42 +0200409
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400410 list_add(&rec->list, &ftrace_new_addrs);
Steven Rostedt3d083392008-05-12 21:20:42 +0200411
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400412 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +0200413}
414
Steven Rostedt05736a42008-09-22 14:55:47 -0700415static void print_ip_ins(const char *fmt, unsigned char *p)
416{
417 int i;
418
419 printk(KERN_CONT "%s", fmt);
420
421 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
422 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
423}
424
Steven Rostedt31e88902008-11-14 16:21:19 -0800425static void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800426{
427 switch (failed) {
428 case -EFAULT:
429 FTRACE_WARN_ON_ONCE(1);
430 pr_info("ftrace faulted on modifying ");
431 print_ip_sym(ip);
432 break;
433 case -EINVAL:
434 FTRACE_WARN_ON_ONCE(1);
435 pr_info("ftrace failed to modify ");
436 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800437 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800438 printk(KERN_CONT "\n");
439 break;
440 case -EPERM:
441 FTRACE_WARN_ON_ONCE(1);
442 pr_info("ftrace faulted on writing ");
443 print_ip_sym(ip);
444 break;
445 default:
446 FTRACE_WARN_ON_ONCE(1);
447 pr_info("ftrace faulted on unknown error ");
448 print_ip_sym(ip);
449 }
450}
451
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200452
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530453static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800454__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200455{
Ingo Molnare309b412008-05-12 21:20:51 +0200456 unsigned long ip, fl;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100457 unsigned long ftrace_addr;
458
Shaohua Lif0001202009-01-09 11:29:42 +0800459 ftrace_addr = (unsigned long)FTRACE_ADDR;
Steven Rostedt5072c592008-05-12 21:20:43 +0200460
461 ip = rec->ip;
462
Steven Rostedt982c3502008-11-15 16:31:41 -0500463 /*
464 * If this record is not to be traced and
465 * it is not enabled then do nothing.
466 *
467 * If this record is not to be traced and
Wenji Huang57794a92009-02-06 17:33:27 +0800468 * it is enabled then disable it.
Steven Rostedt982c3502008-11-15 16:31:41 -0500469 *
470 */
471 if (rec->flags & FTRACE_FL_NOTRACE) {
472 if (rec->flags & FTRACE_FL_ENABLED)
473 rec->flags &= ~FTRACE_FL_ENABLED;
474 else
Steven Rostedt5072c592008-05-12 21:20:43 +0200475 return 0;
476
Steven Rostedt982c3502008-11-15 16:31:41 -0500477 } else if (ftrace_filtered && enable) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200478 /*
Steven Rostedt982c3502008-11-15 16:31:41 -0500479 * Filtering is on:
Steven Rostedt5072c592008-05-12 21:20:43 +0200480 */
Steven Rostedt982c3502008-11-15 16:31:41 -0500481
482 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
483
484 /* Record is filtered and enabled, do nothing */
485 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
486 return 0;
487
Wenji Huang57794a92009-02-06 17:33:27 +0800488 /* Record is not filtered or enabled, do nothing */
Steven Rostedt982c3502008-11-15 16:31:41 -0500489 if (!fl)
490 return 0;
491
492 /* Record is not filtered but enabled, disable it */
493 if (fl == FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200494 rec->flags &= ~FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500495 else
496 /* Otherwise record is filtered but not enabled, enable it */
Steven Rostedt5072c592008-05-12 21:20:43 +0200497 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +0200498 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500499 /* Disable or not filtered */
Steven Rostedt5072c592008-05-12 21:20:43 +0200500
501 if (enable) {
Steven Rostedt982c3502008-11-15 16:31:41 -0500502 /* if record is enabled, do nothing */
Steven Rostedt41c52c02008-05-22 11:46:33 -0400503 if (rec->flags & FTRACE_FL_ENABLED)
Steven Rostedt5072c592008-05-12 21:20:43 +0200504 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500505
Steven Rostedt41c52c02008-05-22 11:46:33 -0400506 rec->flags |= FTRACE_FL_ENABLED;
Steven Rostedt982c3502008-11-15 16:31:41 -0500507
Steven Rostedt5072c592008-05-12 21:20:43 +0200508 } else {
Steven Rostedt982c3502008-11-15 16:31:41 -0500509
Wenji Huang57794a92009-02-06 17:33:27 +0800510 /* if record is not enabled, do nothing */
Steven Rostedt5072c592008-05-12 21:20:43 +0200511 if (!(rec->flags & FTRACE_FL_ENABLED))
512 return 0;
Steven Rostedt982c3502008-11-15 16:31:41 -0500513
Steven Rostedt5072c592008-05-12 21:20:43 +0200514 rec->flags &= ~FTRACE_FL_ENABLED;
515 }
516 }
517
Steven Rostedt982c3502008-11-15 16:31:41 -0500518 if (rec->flags & FTRACE_FL_ENABLED)
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100519 return ftrace_make_call(rec, ftrace_addr);
Steven Rostedt31e88902008-11-14 16:21:19 -0800520 else
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100521 return ftrace_make_nop(NULL, rec, ftrace_addr);
Steven Rostedt5072c592008-05-12 21:20:43 +0200522}
523
524static void ftrace_replace_code(int enable)
525{
526 int i, failed;
Steven Rostedt37ad50842008-05-12 21:20:48 +0200527 struct dyn_ftrace *rec;
528 struct ftrace_page *pg;
529
Steven Rostedt37ad50842008-05-12 21:20:48 +0200530 for (pg = ftrace_pages_start; pg; pg = pg->next) {
531 for (i = 0; i < pg->index; i++) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200532 rec = &pg->records[i];
533
Steven Rostedt918c1152008-11-14 16:21:19 -0800534 /*
535 * Skip over free records and records that have
536 * failed.
537 */
538 if (rec->flags & FTRACE_FL_FREE ||
539 rec->flags & FTRACE_FL_FAILED)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200540 continue;
541
542 /* ignore updates to this record's mcount site */
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200543 if (get_kprobe((void *)rec->ip)) {
544 freeze_record(rec);
Steven Rostedt5072c592008-05-12 21:20:43 +0200545 continue;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200546 } else {
547 unfreeze_record(rec);
548 }
549
Steven Rostedt31e88902008-11-14 16:21:19 -0800550 failed = __ftrace_replace_code(rec, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200551 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
552 rec->flags |= FTRACE_FL_FAILED;
553 if ((system_state == SYSTEM_BOOTING) ||
554 !core_kernel_text(rec->ip)) {
555 ftrace_free_rec(rec);
Steven Rostedtb17e8a32008-11-14 16:21:19 -0800556 } else
Steven Rostedt31e88902008-11-14 16:21:19 -0800557 ftrace_bug(failed, rec->ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200558 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200559 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200560 }
561}
562
Ingo Molnare309b412008-05-12 21:20:51 +0200563static int
Steven Rostedt31e88902008-11-14 16:21:19 -0800564ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200565{
566 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400567 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200568
569 ip = rec->ip;
570
Shaohua Li25aac9d2009-01-09 11:29:40 +0800571 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -0400572 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -0800573 ftrace_bug(ret, ip);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200574 rec->flags |= FTRACE_FL_FAILED;
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530575 return 0;
Steven Rostedt37ad50842008-05-12 21:20:48 +0200576 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +0530577 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200578}
579
Ingo Molnare309b412008-05-12 21:20:51 +0200580static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200581{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200582 int *command = data;
583
Steven Rostedta3583242008-11-11 15:01:42 -0500584 if (*command & FTRACE_ENABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200585 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -0500586 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200587 ftrace_replace_code(0);
588
589 if (*command & FTRACE_UPDATE_TRACE_FUNC)
590 ftrace_update_ftrace_func(ftrace_trace_function);
591
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500592 if (*command & FTRACE_START_FUNC_RET)
593 ftrace_enable_ftrace_graph_caller();
594 else if (*command & FTRACE_STOP_FUNC_RET)
595 ftrace_disable_ftrace_graph_caller();
596
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200597 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200598}
599
Ingo Molnare309b412008-05-12 21:20:51 +0200600static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200601{
Rusty Russell784e2d72008-07-28 12:16:31 -0500602 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt3d083392008-05-12 21:20:42 +0200603}
604
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200605static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500606static int ftrace_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500607
608static void ftrace_startup_enable(int command)
609{
610 if (saved_ftrace_func != ftrace_trace_function) {
611 saved_ftrace_func = ftrace_trace_function;
612 command |= FTRACE_UPDATE_TRACE_FUNC;
613 }
614
615 if (!command || !ftrace_enabled)
616 return;
617
618 ftrace_run_update_code(command);
619}
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200620
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500621static void ftrace_startup(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200622{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200623 if (unlikely(ftrace_disabled))
624 return;
625
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400626 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500627 ftrace_start_up++;
Steven Rostedt982c3502008-11-15 16:31:41 -0500628 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200629
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500630 ftrace_startup_enable(command);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200631
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400632 mutex_unlock(&ftrace_start_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200633}
634
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500635static void ftrace_shutdown(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200636{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200637 if (unlikely(ftrace_disabled))
638 return;
639
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400640 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500641 ftrace_start_up--;
642 if (!ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200643 command |= FTRACE_DISABLE_CALLS;
644
645 if (saved_ftrace_func != ftrace_trace_function) {
646 saved_ftrace_func = ftrace_trace_function;
647 command |= FTRACE_UPDATE_TRACE_FUNC;
648 }
649
650 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200651 goto out;
652
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200653 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200654 out:
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400655 mutex_unlock(&ftrace_start_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200656}
657
Ingo Molnare309b412008-05-12 21:20:51 +0200658static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200659{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200660 int command = FTRACE_ENABLE_MCOUNT;
661
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200662 if (unlikely(ftrace_disabled))
663 return;
664
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400665 mutex_lock(&ftrace_start_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200666 /* Force update next time */
667 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500668 /* ftrace_start_up is true if we want ftrace running */
669 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200670 command |= FTRACE_ENABLE_CALLS;
671
672 ftrace_run_update_code(command);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400673 mutex_unlock(&ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200674}
675
Ingo Molnare309b412008-05-12 21:20:51 +0200676static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200677{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200678 int command = FTRACE_DISABLE_MCOUNT;
679
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200680 if (unlikely(ftrace_disabled))
681 return;
682
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400683 mutex_lock(&ftrace_start_lock);
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500684 /* ftrace_start_up is true if ftrace is running */
685 if (ftrace_start_up)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200686 command |= FTRACE_DISABLE_CALLS;
687
688 ftrace_run_update_code(command);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400689 mutex_unlock(&ftrace_start_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200690}
691
Steven Rostedt3d083392008-05-12 21:20:42 +0200692static cycle_t ftrace_update_time;
693static unsigned long ftrace_update_cnt;
694unsigned long ftrace_update_tot_cnt;
695
Steven Rostedt31e88902008-11-14 16:21:19 -0800696static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +0200697{
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400698 struct dyn_ftrace *p, *t;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530699 cycle_t start, stop;
Steven Rostedt3d083392008-05-12 21:20:42 +0200700
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200701 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200702 ftrace_update_cnt = 0;
703
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400704 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +0530705
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400706 /* If something went wrong, bail without enabling anything */
707 if (unlikely(ftrace_disabled))
708 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200709
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400710 list_del_init(&p->list);
Abhishek Sagar0eb96702008-06-01 21:47:30 +0530711
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400712 /* convert record (i.e, patch mcount-call with NOP) */
Steven Rostedt31e88902008-11-14 16:21:19 -0800713 if (ftrace_code_disable(mod, p)) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400714 p->flags |= FTRACE_FL_CONVERTED;
715 ftrace_update_cnt++;
716 } else
717 ftrace_free_rec(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200718 }
719
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200720 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200721 ftrace_update_time = stop - start;
722 ftrace_update_tot_cnt += ftrace_update_cnt;
723
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200724 return 0;
725}
726
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400727static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200728{
729 struct ftrace_page *pg;
730 int cnt;
731 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200732
733 /* allocate a few pages */
734 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
735 if (!ftrace_pages_start)
736 return -1;
737
738 /*
739 * Allocate a few more pages.
740 *
741 * TODO: have some parser search vmlinux before
742 * final linking to find all calls to ftrace.
743 * Then we can:
744 * a) know how many pages to allocate.
745 * and/or
746 * b) set up the table then.
747 *
748 * The dynamic code is still necessary for
749 * modules.
750 */
751
752 pg = ftrace_pages = ftrace_pages_start;
753
Steven Rostedt68bf21a2008-08-14 15:45:08 -0400754 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -0400755 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +0800756 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200757
758 for (i = 0; i < cnt; i++) {
759 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
760
761 /* If we fail, we'll try later anyway */
762 if (!pg->next)
763 break;
764
765 pg = pg->next;
766 }
767
768 return 0;
769}
770
Steven Rostedt5072c592008-05-12 21:20:43 +0200771enum {
772 FTRACE_ITER_FILTER = (1 << 0),
773 FTRACE_ITER_CONT = (1 << 1),
Steven Rostedt41c52c02008-05-22 11:46:33 -0400774 FTRACE_ITER_NOTRACE = (1 << 2),
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530775 FTRACE_ITER_FAILURES = (1 << 3),
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500776 FTRACE_ITER_PRINTALL = (1 << 4),
Steven Rostedt5072c592008-05-12 21:20:43 +0200777};
778
779#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
780
781struct ftrace_iterator {
Steven Rostedt5072c592008-05-12 21:20:43 +0200782 struct ftrace_page *pg;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500783 int idx;
Steven Rostedt5072c592008-05-12 21:20:43 +0200784 unsigned flags;
785 unsigned char buffer[FTRACE_BUFF_MAX+1];
786 unsigned buffer_idx;
787 unsigned filtered;
788};
789
Ingo Molnare309b412008-05-12 21:20:51 +0200790static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200791t_next(struct seq_file *m, void *v, loff_t *pos)
792{
793 struct ftrace_iterator *iter = m->private;
794 struct dyn_ftrace *rec = NULL;
795
796 (*pos)++;
797
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500798 if (iter->flags & FTRACE_ITER_PRINTALL)
799 return NULL;
800
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400801 /* should not be called from interrupt context */
802 spin_lock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200803 retry:
804 if (iter->idx >= iter->pg->index) {
805 if (iter->pg->next) {
806 iter->pg = iter->pg->next;
807 iter->idx = 0;
808 goto retry;
Liming Wang50cdaf02008-11-28 12:13:21 +0800809 } else {
810 iter->idx = -1;
Steven Rostedt5072c592008-05-12 21:20:43 +0200811 }
812 } else {
813 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -0400814 if ((rec->flags & FTRACE_FL_FREE) ||
815
816 (!(iter->flags & FTRACE_ITER_FAILURES) &&
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530817 (rec->flags & FTRACE_FL_FAILED)) ||
818
819 ((iter->flags & FTRACE_ITER_FAILURES) &&
Steven Rostedta9fdda32008-08-14 22:47:17 -0400820 !(rec->flags & FTRACE_FL_FAILED)) ||
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530821
Steven Rostedt0183fb1c2008-11-07 22:36:02 -0500822 ((iter->flags & FTRACE_ITER_FILTER) &&
823 !(rec->flags & FTRACE_FL_FILTER)) ||
824
Steven Rostedt41c52c02008-05-22 11:46:33 -0400825 ((iter->flags & FTRACE_ITER_NOTRACE) &&
826 !(rec->flags & FTRACE_FL_NOTRACE))) {
Steven Rostedt5072c592008-05-12 21:20:43 +0200827 rec = NULL;
828 goto retry;
829 }
830 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400831 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200832
Steven Rostedt5072c592008-05-12 21:20:43 +0200833 return rec;
834}
835
836static void *t_start(struct seq_file *m, loff_t *pos)
837{
838 struct ftrace_iterator *iter = m->private;
839 void *p = NULL;
Steven Rostedt5072c592008-05-12 21:20:43 +0200840
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500841 /*
842 * For set_ftrace_filter reading, if we have the filter
843 * off, we can short cut and just print out that all
844 * functions are enabled.
845 */
846 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
847 if (*pos > 0)
848 return NULL;
849 iter->flags |= FTRACE_ITER_PRINTALL;
850 (*pos)++;
851 return iter;
852 }
853
Liming Wang50cdaf02008-11-28 12:13:21 +0800854 if (*pos > 0) {
855 if (iter->idx < 0)
856 return p;
857 (*pos)--;
858 iter->idx--;
859 }
walimis5821e1b2008-11-15 15:19:06 +0800860
Liming Wang50cdaf02008-11-28 12:13:21 +0800861 p = t_next(m, p, pos);
Steven Rostedt5072c592008-05-12 21:20:43 +0200862
863 return p;
864}
865
866static void t_stop(struct seq_file *m, void *p)
867{
868}
869
870static int t_show(struct seq_file *m, void *v)
871{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500872 struct ftrace_iterator *iter = m->private;
Steven Rostedt5072c592008-05-12 21:20:43 +0200873 struct dyn_ftrace *rec = v;
874 char str[KSYM_SYMBOL_LEN];
875
Steven Rostedt0c75a3e2009-02-16 11:21:52 -0500876 if (iter->flags & FTRACE_ITER_PRINTALL) {
877 seq_printf(m, "#### all functions enabled ####\n");
878 return 0;
879 }
880
Steven Rostedt5072c592008-05-12 21:20:43 +0200881 if (!rec)
882 return 0;
883
884 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
885
Liming Wang50cdaf02008-11-28 12:13:21 +0800886 seq_printf(m, "%s\n", str);
Steven Rostedt5072c592008-05-12 21:20:43 +0200887
888 return 0;
889}
890
891static struct seq_operations show_ftrace_seq_ops = {
892 .start = t_start,
893 .next = t_next,
894 .stop = t_stop,
895 .show = t_show,
896};
897
Ingo Molnare309b412008-05-12 21:20:51 +0200898static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200899ftrace_avail_open(struct inode *inode, struct file *file)
900{
901 struct ftrace_iterator *iter;
902 int ret;
903
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200904 if (unlikely(ftrace_disabled))
905 return -ENODEV;
906
Steven Rostedt5072c592008-05-12 21:20:43 +0200907 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
908 if (!iter)
909 return -ENOMEM;
910
911 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +0200912
913 ret = seq_open(file, &show_ftrace_seq_ops);
914 if (!ret) {
915 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200916
Steven Rostedt5072c592008-05-12 21:20:43 +0200917 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200918 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200919 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200920 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200921
922 return ret;
923}
924
925int ftrace_avail_release(struct inode *inode, struct file *file)
926{
927 struct seq_file *m = (struct seq_file *)file->private_data;
928 struct ftrace_iterator *iter = m->private;
929
930 seq_release(inode, file);
931 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200932
Steven Rostedt5072c592008-05-12 21:20:43 +0200933 return 0;
934}
935
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +0530936static int
937ftrace_failures_open(struct inode *inode, struct file *file)
938{
939 int ret;
940 struct seq_file *m;
941 struct ftrace_iterator *iter;
942
943 ret = ftrace_avail_open(inode, file);
944 if (!ret) {
945 m = (struct seq_file *)file->private_data;
946 iter = (struct ftrace_iterator *)m->private;
947 iter->flags = FTRACE_ITER_FAILURES;
948 }
949
950 return ret;
951}
952
953
Steven Rostedt41c52c02008-05-22 11:46:33 -0400954static void ftrace_filter_reset(int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200955{
956 struct ftrace_page *pg;
957 struct dyn_ftrace *rec;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400958 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +0200959 unsigned i;
960
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400961 /* should not be called from interrupt context */
962 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -0400963 if (enable)
964 ftrace_filtered = 0;
Steven Rostedt5072c592008-05-12 21:20:43 +0200965 pg = ftrace_pages_start;
966 while (pg) {
967 for (i = 0; i < pg->index; i++) {
968 rec = &pg->records[i];
969 if (rec->flags & FTRACE_FL_FAILED)
970 continue;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400971 rec->flags &= ~type;
Steven Rostedt5072c592008-05-12 21:20:43 +0200972 }
973 pg = pg->next;
974 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400975 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200976}
977
Ingo Molnare309b412008-05-12 21:20:51 +0200978static int
Steven Rostedt41c52c02008-05-22 11:46:33 -0400979ftrace_regex_open(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +0200980{
981 struct ftrace_iterator *iter;
982 int ret = 0;
983
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200984 if (unlikely(ftrace_disabled))
985 return -ENODEV;
986
Steven Rostedt5072c592008-05-12 21:20:43 +0200987 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
988 if (!iter)
989 return -ENOMEM;
990
Steven Rostedt41c52c02008-05-22 11:46:33 -0400991 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200992 if ((file->f_mode & FMODE_WRITE) &&
993 !(file->f_flags & O_APPEND))
Steven Rostedt41c52c02008-05-22 11:46:33 -0400994 ftrace_filter_reset(enable);
Steven Rostedt5072c592008-05-12 21:20:43 +0200995
996 if (file->f_mode & FMODE_READ) {
997 iter->pg = ftrace_pages_start;
Steven Rostedt41c52c02008-05-22 11:46:33 -0400998 iter->flags = enable ? FTRACE_ITER_FILTER :
999 FTRACE_ITER_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001000
1001 ret = seq_open(file, &show_ftrace_seq_ops);
1002 if (!ret) {
1003 struct seq_file *m = file->private_data;
1004 m->private = iter;
1005 } else
1006 kfree(iter);
1007 } else
1008 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001009 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001010
1011 return ret;
1012}
1013
Steven Rostedt41c52c02008-05-22 11:46:33 -04001014static int
1015ftrace_filter_open(struct inode *inode, struct file *file)
1016{
1017 return ftrace_regex_open(inode, file, 1);
1018}
1019
1020static int
1021ftrace_notrace_open(struct inode *inode, struct file *file)
1022{
1023 return ftrace_regex_open(inode, file, 0);
1024}
1025
Ingo Molnare309b412008-05-12 21:20:51 +02001026static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001027ftrace_regex_read(struct file *file, char __user *ubuf,
Steven Rostedt5072c592008-05-12 21:20:43 +02001028 size_t cnt, loff_t *ppos)
1029{
1030 if (file->f_mode & FMODE_READ)
1031 return seq_read(file, ubuf, cnt, ppos);
1032 else
1033 return -EPERM;
1034}
1035
Ingo Molnare309b412008-05-12 21:20:51 +02001036static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001037ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02001038{
1039 loff_t ret;
1040
1041 if (file->f_mode & FMODE_READ)
1042 ret = seq_lseek(file, offset, origin);
1043 else
1044 file->f_pos = ret = 1;
1045
1046 return ret;
1047}
1048
1049enum {
1050 MATCH_FULL,
1051 MATCH_FRONT_ONLY,
1052 MATCH_MIDDLE_ONLY,
1053 MATCH_END_ONLY,
1054};
1055
Ingo Molnare309b412008-05-12 21:20:51 +02001056static void
Steven Rostedt41c52c02008-05-22 11:46:33 -04001057ftrace_match(unsigned char *buff, int len, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001058{
1059 char str[KSYM_SYMBOL_LEN];
1060 char *search = NULL;
1061 struct ftrace_page *pg;
1062 struct dyn_ftrace *rec;
1063 int type = MATCH_FULL;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001064 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
Steven Rostedt5072c592008-05-12 21:20:43 +02001065 unsigned i, match = 0, search_len = 0;
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001066 int not = 0;
1067
1068 if (buff[0] == '!') {
1069 not = 1;
1070 buff++;
1071 len--;
1072 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001073
1074 for (i = 0; i < len; i++) {
1075 if (buff[i] == '*') {
1076 if (!i) {
1077 search = buff + i + 1;
1078 type = MATCH_END_ONLY;
1079 search_len = len - (i + 1);
1080 } else {
1081 if (type == MATCH_END_ONLY) {
1082 type = MATCH_MIDDLE_ONLY;
1083 } else {
1084 match = i;
1085 type = MATCH_FRONT_ONLY;
1086 }
1087 buff[i] = 0;
1088 break;
1089 }
1090 }
1091 }
1092
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001093 /* should not be called from interrupt context */
1094 spin_lock(&ftrace_lock);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001095 if (enable)
1096 ftrace_filtered = 1;
Steven Rostedt5072c592008-05-12 21:20:43 +02001097 pg = ftrace_pages_start;
1098 while (pg) {
1099 for (i = 0; i < pg->index; i++) {
1100 int matched = 0;
1101 char *ptr;
1102
1103 rec = &pg->records[i];
1104 if (rec->flags & FTRACE_FL_FAILED)
1105 continue;
1106 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1107 switch (type) {
1108 case MATCH_FULL:
1109 if (strcmp(str, buff) == 0)
1110 matched = 1;
1111 break;
1112 case MATCH_FRONT_ONLY:
1113 if (memcmp(str, buff, match) == 0)
1114 matched = 1;
1115 break;
1116 case MATCH_MIDDLE_ONLY:
1117 if (strstr(str, search))
1118 matched = 1;
1119 break;
1120 case MATCH_END_ONLY:
1121 ptr = strstr(str, search);
1122 if (ptr && (ptr[search_len] == 0))
1123 matched = 1;
1124 break;
1125 }
Steven Rostedtea3a6d62008-12-17 15:05:36 -05001126 if (matched) {
1127 if (not)
1128 rec->flags &= ~flag;
1129 else
1130 rec->flags |= flag;
1131 }
Steven Rostedt5072c592008-05-12 21:20:43 +02001132 }
1133 pg = pg->next;
1134 }
Steven Rostedt99ecdc42008-08-15 21:40:05 -04001135 spin_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001136}
1137
Ingo Molnare309b412008-05-12 21:20:51 +02001138static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04001139ftrace_regex_write(struct file *file, const char __user *ubuf,
1140 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001141{
1142 struct ftrace_iterator *iter;
1143 char ch;
1144 size_t read = 0;
1145 ssize_t ret;
1146
1147 if (!cnt || cnt < 0)
1148 return 0;
1149
Steven Rostedt41c52c02008-05-22 11:46:33 -04001150 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001151
1152 if (file->f_mode & FMODE_READ) {
1153 struct seq_file *m = file->private_data;
1154 iter = m->private;
1155 } else
1156 iter = file->private_data;
1157
1158 if (!*ppos) {
1159 iter->flags &= ~FTRACE_ITER_CONT;
1160 iter->buffer_idx = 0;
1161 }
1162
1163 ret = get_user(ch, ubuf++);
1164 if (ret)
1165 goto out;
1166 read++;
1167 cnt--;
1168
1169 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1170 /* skip white space */
1171 while (cnt && isspace(ch)) {
1172 ret = get_user(ch, ubuf++);
1173 if (ret)
1174 goto out;
1175 read++;
1176 cnt--;
1177 }
1178
Steven Rostedt5072c592008-05-12 21:20:43 +02001179 if (isspace(ch)) {
1180 file->f_pos += read;
1181 ret = read;
1182 goto out;
1183 }
1184
1185 iter->buffer_idx = 0;
1186 }
1187
1188 while (cnt && !isspace(ch)) {
1189 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1190 iter->buffer[iter->buffer_idx++] = ch;
1191 else {
1192 ret = -EINVAL;
1193 goto out;
1194 }
1195 ret = get_user(ch, ubuf++);
1196 if (ret)
1197 goto out;
1198 read++;
1199 cnt--;
1200 }
1201
1202 if (isspace(ch)) {
1203 iter->filtered++;
1204 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001205 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001206 iter->buffer_idx = 0;
1207 } else
1208 iter->flags |= FTRACE_ITER_CONT;
1209
1210
1211 file->f_pos += read;
1212
1213 ret = read;
1214 out:
Steven Rostedt41c52c02008-05-22 11:46:33 -04001215 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001216
1217 return ret;
1218}
1219
Steven Rostedt41c52c02008-05-22 11:46:33 -04001220static ssize_t
1221ftrace_filter_write(struct file *file, const char __user *ubuf,
1222 size_t cnt, loff_t *ppos)
1223{
1224 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1225}
1226
1227static ssize_t
1228ftrace_notrace_write(struct file *file, const char __user *ubuf,
1229 size_t cnt, loff_t *ppos)
1230{
1231 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1232}
1233
1234static void
1235ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1236{
1237 if (unlikely(ftrace_disabled))
1238 return;
1239
1240 mutex_lock(&ftrace_regex_lock);
1241 if (reset)
1242 ftrace_filter_reset(enable);
1243 if (buf)
1244 ftrace_match(buf, len, enable);
1245 mutex_unlock(&ftrace_regex_lock);
1246}
1247
Steven Rostedt77a2b372008-05-12 21:20:45 +02001248/**
1249 * ftrace_set_filter - set a function to filter on in ftrace
1250 * @buf - the string that holds the function filter text.
1251 * @len - the length of the string.
1252 * @reset - non zero to reset all filters before applying this filter.
1253 *
1254 * Filters denote which functions should be enabled when tracing is enabled.
1255 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1256 */
Ingo Molnare309b412008-05-12 21:20:51 +02001257void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001258{
Steven Rostedt41c52c02008-05-22 11:46:33 -04001259 ftrace_set_regex(buf, len, reset, 1);
1260}
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001261
Steven Rostedt41c52c02008-05-22 11:46:33 -04001262/**
1263 * ftrace_set_notrace - set a function to not trace in ftrace
1264 * @buf - the string that holds the function notrace text.
1265 * @len - the length of the string.
1266 * @reset - non zero to reset all filters before applying this filter.
1267 *
1268 * Notrace Filters denote which functions should not be enabled when tracing
1269 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1270 * for tracing.
1271 */
1272void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1273{
1274 ftrace_set_regex(buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02001275}
1276
Ingo Molnare309b412008-05-12 21:20:51 +02001277static int
Steven Rostedt41c52c02008-05-22 11:46:33 -04001278ftrace_regex_release(struct inode *inode, struct file *file, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02001279{
1280 struct seq_file *m = (struct seq_file *)file->private_data;
1281 struct ftrace_iterator *iter;
1282
Steven Rostedt41c52c02008-05-22 11:46:33 -04001283 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001284 if (file->f_mode & FMODE_READ) {
1285 iter = m->private;
1286
1287 seq_release(inode, file);
1288 } else
1289 iter = file->private_data;
1290
1291 if (iter->buffer_idx) {
1292 iter->filtered++;
1293 iter->buffer[iter->buffer_idx] = 0;
Steven Rostedt41c52c02008-05-22 11:46:33 -04001294 ftrace_match(iter->buffer, iter->buffer_idx, enable);
Steven Rostedt5072c592008-05-12 21:20:43 +02001295 }
1296
1297 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001298 mutex_lock(&ftrace_start_lock);
Steven Rostedtee02a2e2008-11-15 16:31:41 -05001299 if (ftrace_start_up && ftrace_enabled)
Steven Rostedt5072c592008-05-12 21:20:43 +02001300 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
Steven Rostedtcb7be3b2008-10-23 09:33:05 -04001301 mutex_unlock(&ftrace_start_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001302 mutex_unlock(&ftrace_sysctl_lock);
1303
1304 kfree(iter);
Steven Rostedt41c52c02008-05-22 11:46:33 -04001305 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02001306 return 0;
1307}
1308
Steven Rostedt41c52c02008-05-22 11:46:33 -04001309static int
1310ftrace_filter_release(struct inode *inode, struct file *file)
1311{
1312 return ftrace_regex_release(inode, file, 1);
1313}
1314
1315static int
1316ftrace_notrace_release(struct inode *inode, struct file *file)
1317{
1318 return ftrace_regex_release(inode, file, 0);
1319}
1320
Steven Rostedt5072c592008-05-12 21:20:43 +02001321static struct file_operations ftrace_avail_fops = {
1322 .open = ftrace_avail_open,
1323 .read = seq_read,
1324 .llseek = seq_lseek,
1325 .release = ftrace_avail_release,
1326};
1327
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301328static struct file_operations ftrace_failures_fops = {
1329 .open = ftrace_failures_open,
1330 .read = seq_read,
1331 .llseek = seq_lseek,
1332 .release = ftrace_avail_release,
1333};
1334
Steven Rostedt5072c592008-05-12 21:20:43 +02001335static struct file_operations ftrace_filter_fops = {
1336 .open = ftrace_filter_open,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001337 .read = ftrace_regex_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02001338 .write = ftrace_filter_write,
Steven Rostedt41c52c02008-05-22 11:46:33 -04001339 .llseek = ftrace_regex_lseek,
Steven Rostedt5072c592008-05-12 21:20:43 +02001340 .release = ftrace_filter_release,
1341};
1342
Steven Rostedt41c52c02008-05-22 11:46:33 -04001343static struct file_operations ftrace_notrace_fops = {
1344 .open = ftrace_notrace_open,
1345 .read = ftrace_regex_read,
1346 .write = ftrace_notrace_write,
1347 .llseek = ftrace_regex_lseek,
1348 .release = ftrace_notrace_release,
1349};
1350
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001351#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1352
1353static DEFINE_MUTEX(graph_lock);
1354
1355int ftrace_graph_count;
1356unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1357
1358static void *
1359g_next(struct seq_file *m, void *v, loff_t *pos)
1360{
1361 unsigned long *array = m->private;
1362 int index = *pos;
1363
1364 (*pos)++;
1365
1366 if (index >= ftrace_graph_count)
1367 return NULL;
1368
1369 return &array[index];
1370}
1371
1372static void *g_start(struct seq_file *m, loff_t *pos)
1373{
1374 void *p = NULL;
1375
1376 mutex_lock(&graph_lock);
1377
1378 p = g_next(m, p, pos);
1379
1380 return p;
1381}
1382
1383static void g_stop(struct seq_file *m, void *p)
1384{
1385 mutex_unlock(&graph_lock);
1386}
1387
1388static int g_show(struct seq_file *m, void *v)
1389{
1390 unsigned long *ptr = v;
1391 char str[KSYM_SYMBOL_LEN];
1392
1393 if (!ptr)
1394 return 0;
1395
1396 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1397
1398 seq_printf(m, "%s\n", str);
1399
1400 return 0;
1401}
1402
1403static struct seq_operations ftrace_graph_seq_ops = {
1404 .start = g_start,
1405 .next = g_next,
1406 .stop = g_stop,
1407 .show = g_show,
1408};
1409
1410static int
1411ftrace_graph_open(struct inode *inode, struct file *file)
1412{
1413 int ret = 0;
1414
1415 if (unlikely(ftrace_disabled))
1416 return -ENODEV;
1417
1418 mutex_lock(&graph_lock);
1419 if ((file->f_mode & FMODE_WRITE) &&
1420 !(file->f_flags & O_APPEND)) {
1421 ftrace_graph_count = 0;
1422 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1423 }
1424
1425 if (file->f_mode & FMODE_READ) {
1426 ret = seq_open(file, &ftrace_graph_seq_ops);
1427 if (!ret) {
1428 struct seq_file *m = file->private_data;
1429 m->private = ftrace_graph_funcs;
1430 }
1431 } else
1432 file->private_data = ftrace_graph_funcs;
1433 mutex_unlock(&graph_lock);
1434
1435 return ret;
1436}
1437
1438static ssize_t
1439ftrace_graph_read(struct file *file, char __user *ubuf,
1440 size_t cnt, loff_t *ppos)
1441{
1442 if (file->f_mode & FMODE_READ)
1443 return seq_read(file, ubuf, cnt, ppos);
1444 else
1445 return -EPERM;
1446}
1447
1448static int
1449ftrace_set_func(unsigned long *array, int idx, char *buffer)
1450{
1451 char str[KSYM_SYMBOL_LEN];
1452 struct dyn_ftrace *rec;
1453 struct ftrace_page *pg;
1454 int found = 0;
Liming Wangfaec2ec2008-12-04 14:24:49 +08001455 int i, j;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001456
1457 if (ftrace_disabled)
1458 return -ENODEV;
1459
1460 /* should not be called from interrupt context */
1461 spin_lock(&ftrace_lock);
1462
1463 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1464 for (i = 0; i < pg->index; i++) {
1465 rec = &pg->records[i];
1466
1467 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1468 continue;
1469
1470 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1471 if (strcmp(str, buffer) == 0) {
1472 found = 1;
Liming Wangfaec2ec2008-12-04 14:24:49 +08001473 for (j = 0; j < idx; j++)
1474 if (array[j] == rec->ip) {
1475 found = 0;
1476 break;
1477 }
1478 if (found)
1479 array[idx] = rec->ip;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001480 break;
1481 }
1482 }
1483 }
1484 spin_unlock(&ftrace_lock);
1485
1486 return found ? 0 : -EINVAL;
1487}
1488
1489static ssize_t
1490ftrace_graph_write(struct file *file, const char __user *ubuf,
1491 size_t cnt, loff_t *ppos)
1492{
1493 unsigned char buffer[FTRACE_BUFF_MAX+1];
1494 unsigned long *array;
1495 size_t read = 0;
1496 ssize_t ret;
1497 int index = 0;
1498 char ch;
1499
1500 if (!cnt || cnt < 0)
1501 return 0;
1502
1503 mutex_lock(&graph_lock);
1504
1505 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1506 ret = -EBUSY;
1507 goto out;
1508 }
1509
1510 if (file->f_mode & FMODE_READ) {
1511 struct seq_file *m = file->private_data;
1512 array = m->private;
1513 } else
1514 array = file->private_data;
1515
1516 ret = get_user(ch, ubuf++);
1517 if (ret)
1518 goto out;
1519 read++;
1520 cnt--;
1521
1522 /* skip white space */
1523 while (cnt && isspace(ch)) {
1524 ret = get_user(ch, ubuf++);
1525 if (ret)
1526 goto out;
1527 read++;
1528 cnt--;
1529 }
1530
1531 if (isspace(ch)) {
1532 *ppos += read;
1533 ret = read;
1534 goto out;
1535 }
1536
1537 while (cnt && !isspace(ch)) {
1538 if (index < FTRACE_BUFF_MAX)
1539 buffer[index++] = ch;
1540 else {
1541 ret = -EINVAL;
1542 goto out;
1543 }
1544 ret = get_user(ch, ubuf++);
1545 if (ret)
1546 goto out;
1547 read++;
1548 cnt--;
1549 }
1550 buffer[index] = 0;
1551
1552 /* we allow only one at a time */
1553 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1554 if (ret)
1555 goto out;
1556
1557 ftrace_graph_count++;
1558
1559 file->f_pos += read;
1560
1561 ret = read;
1562 out:
1563 mutex_unlock(&graph_lock);
1564
1565 return ret;
1566}
1567
1568static const struct file_operations ftrace_graph_fops = {
1569 .open = ftrace_graph_open,
1570 .read = ftrace_graph_read,
1571 .write = ftrace_graph_write,
1572};
1573#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1574
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001575static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02001576{
Steven Rostedt5072c592008-05-12 21:20:43 +02001577 struct dentry *entry;
1578
Steven Rostedt5072c592008-05-12 21:20:43 +02001579 entry = debugfs_create_file("available_filter_functions", 0444,
1580 d_tracer, NULL, &ftrace_avail_fops);
1581 if (!entry)
1582 pr_warning("Could not create debugfs "
1583 "'available_filter_functions' entry\n");
1584
Abhishek Sagareb9a7bf2008-06-01 21:47:54 +05301585 entry = debugfs_create_file("failures", 0444,
1586 d_tracer, NULL, &ftrace_failures_fops);
1587 if (!entry)
1588 pr_warning("Could not create debugfs 'failures' entry\n");
1589
Steven Rostedt5072c592008-05-12 21:20:43 +02001590 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1591 NULL, &ftrace_filter_fops);
1592 if (!entry)
1593 pr_warning("Could not create debugfs "
1594 "'set_ftrace_filter' entry\n");
Steven Rostedt41c52c02008-05-22 11:46:33 -04001595
1596 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1597 NULL, &ftrace_notrace_fops);
1598 if (!entry)
1599 pr_warning("Could not create debugfs "
1600 "'set_ftrace_notrace' entry\n");
Steven Rostedtad90c0e2008-05-27 20:48:37 -04001601
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001602#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1603 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1604 NULL,
1605 &ftrace_graph_fops);
1606 if (!entry)
1607 pr_warning("Could not create debugfs "
1608 "'set_graph_function' entry\n");
1609#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1610
Steven Rostedt5072c592008-05-12 21:20:43 +02001611 return 0;
1612}
1613
Steven Rostedt31e88902008-11-14 16:21:19 -08001614static int ftrace_convert_nops(struct module *mod,
1615 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001616 unsigned long *end)
1617{
1618 unsigned long *p;
1619 unsigned long addr;
1620 unsigned long flags;
1621
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001622 mutex_lock(&ftrace_start_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001623 p = start;
1624 while (p < end) {
1625 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08001626 /*
1627 * Some architecture linkers will pad between
1628 * the different mcount_loc sections of different
1629 * object files to satisfy alignments.
1630 * Skip any NULL pointers.
1631 */
1632 if (!addr)
1633 continue;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001634 ftrace_record_ip(addr);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001635 }
1636
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001637 /* disable interrupts to prevent kstop machine */
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001638 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08001639 ftrace_update_code(mod);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001640 local_irq_restore(flags);
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001641 mutex_unlock(&ftrace_start_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001642
1643 return 0;
1644}
1645
Steven Rostedt31e88902008-11-14 16:21:19 -08001646void ftrace_init_module(struct module *mod,
1647 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04001648{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04001649 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04001650 return;
Steven Rostedt31e88902008-11-14 16:21:19 -08001651 ftrace_convert_nops(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04001652}
1653
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001654extern unsigned long __start_mcount_loc[];
1655extern unsigned long __stop_mcount_loc[];
1656
1657void __init ftrace_init(void)
1658{
1659 unsigned long count, addr, flags;
1660 int ret;
1661
1662 /* Keep the ftrace pointer to the stub */
1663 addr = (unsigned long)ftrace_stub;
1664
1665 local_irq_save(flags);
1666 ftrace_dyn_arch_init(&addr);
1667 local_irq_restore(flags);
1668
1669 /* ftrace_dyn_arch_init places the return code in addr */
1670 if (addr)
1671 goto failed;
1672
1673 count = __stop_mcount_loc - __start_mcount_loc;
1674
1675 ret = ftrace_dyn_table_alloc(count);
1676 if (ret)
1677 goto failed;
1678
1679 last_ftrace_enabled = ftrace_enabled = 1;
1680
Steven Rostedt31e88902008-11-14 16:21:19 -08001681 ret = ftrace_convert_nops(NULL,
1682 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001683 __stop_mcount_loc);
1684
1685 return;
1686 failed:
1687 ftrace_disabled = 1;
1688}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001689
Steven Rostedt3d083392008-05-12 21:20:42 +02001690#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01001691
1692static int __init ftrace_nodyn_init(void)
1693{
1694 ftrace_enabled = 1;
1695 return 0;
1696}
1697device_initcall(ftrace_nodyn_init);
1698
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001699static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1700static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001701/* Keep as macros so we do not need to define the commands */
1702# define ftrace_startup(command) do { } while (0)
1703# define ftrace_shutdown(command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001704# define ftrace_startup_sysctl() do { } while (0)
1705# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001706#endif /* CONFIG_DYNAMIC_FTRACE */
1707
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001708static ssize_t
1709ftrace_pid_read(struct file *file, char __user *ubuf,
1710 size_t cnt, loff_t *ppos)
1711{
1712 char buf[64];
1713 int r;
1714
Steven Rostedte32d8952008-12-04 00:26:41 -05001715 if (ftrace_pid_trace == ftrace_swapper_pid)
1716 r = sprintf(buf, "swapper tasks\n");
1717 else if (ftrace_pid_trace)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001718 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001719 else
1720 r = sprintf(buf, "no pid\n");
1721
1722 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1723}
1724
Steven Rostedte32d8952008-12-04 00:26:41 -05001725static void clear_ftrace_swapper(void)
1726{
1727 struct task_struct *p;
1728 int cpu;
1729
1730 get_online_cpus();
1731 for_each_online_cpu(cpu) {
1732 p = idle_task(cpu);
1733 clear_tsk_trace_trace(p);
1734 }
1735 put_online_cpus();
1736}
1737
1738static void set_ftrace_swapper(void)
1739{
1740 struct task_struct *p;
1741 int cpu;
1742
1743 get_online_cpus();
1744 for_each_online_cpu(cpu) {
1745 p = idle_task(cpu);
1746 set_tsk_trace_trace(p);
1747 }
1748 put_online_cpus();
1749}
1750
1751static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001752{
1753 struct task_struct *p;
1754
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001755 rcu_read_lock();
Steven Rostedte32d8952008-12-04 00:26:41 -05001756 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05001757 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05001758 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001759 rcu_read_unlock();
1760
Steven Rostedte32d8952008-12-04 00:26:41 -05001761 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05001762}
1763
Steven Rostedte32d8952008-12-04 00:26:41 -05001764static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05001765{
1766 struct task_struct *p;
1767
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001768 rcu_read_lock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05001769 do_each_pid_task(pid, PIDTYPE_PID, p) {
1770 set_tsk_trace_trace(p);
1771 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01001772 rcu_read_unlock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05001773}
1774
Steven Rostedte32d8952008-12-04 00:26:41 -05001775static void clear_ftrace_pid_task(struct pid **pid)
1776{
1777 if (*pid == ftrace_swapper_pid)
1778 clear_ftrace_swapper();
1779 else
1780 clear_ftrace_pid(*pid);
1781
1782 *pid = NULL;
1783}
1784
1785static void set_ftrace_pid_task(struct pid *pid)
1786{
1787 if (pid == ftrace_swapper_pid)
1788 set_ftrace_swapper();
1789 else
1790 set_ftrace_pid(pid);
1791}
1792
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001793static ssize_t
1794ftrace_pid_write(struct file *filp, const char __user *ubuf,
1795 size_t cnt, loff_t *ppos)
1796{
Steven Rostedt978f3a42008-12-04 00:26:40 -05001797 struct pid *pid;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001798 char buf[64];
1799 long val;
1800 int ret;
1801
1802 if (cnt >= sizeof(buf))
1803 return -EINVAL;
1804
1805 if (copy_from_user(&buf, ubuf, cnt))
1806 return -EFAULT;
1807
1808 buf[cnt] = 0;
1809
1810 ret = strict_strtol(buf, 10, &val);
1811 if (ret < 0)
1812 return ret;
1813
1814 mutex_lock(&ftrace_start_lock);
Steven Rostedt978f3a42008-12-04 00:26:40 -05001815 if (val < 0) {
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001816 /* disable pid tracing */
Steven Rostedt978f3a42008-12-04 00:26:40 -05001817 if (!ftrace_pid_trace)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001818 goto out;
Steven Rostedt978f3a42008-12-04 00:26:40 -05001819
1820 clear_ftrace_pid_task(&ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001821
1822 } else {
Steven Rostedte32d8952008-12-04 00:26:41 -05001823 /* swapper task is special */
1824 if (!val) {
1825 pid = ftrace_swapper_pid;
1826 if (pid == ftrace_pid_trace)
1827 goto out;
1828 } else {
1829 pid = find_get_pid(val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001830
Steven Rostedte32d8952008-12-04 00:26:41 -05001831 if (pid == ftrace_pid_trace) {
1832 put_pid(pid);
1833 goto out;
1834 }
Steven Rostedt978f3a42008-12-04 00:26:40 -05001835 }
1836
1837 if (ftrace_pid_trace)
1838 clear_ftrace_pid_task(&ftrace_pid_trace);
1839
1840 if (!pid)
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001841 goto out;
1842
Steven Rostedt978f3a42008-12-04 00:26:40 -05001843 ftrace_pid_trace = pid;
Steven Rostedt0ef8cde2008-12-03 15:36:58 -05001844
Steven Rostedt978f3a42008-12-04 00:26:40 -05001845 set_ftrace_pid_task(ftrace_pid_trace);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001846 }
1847
1848 /* update the function call */
1849 ftrace_update_pid_func();
1850 ftrace_startup_enable(0);
1851
1852 out:
1853 mutex_unlock(&ftrace_start_lock);
1854
1855 return cnt;
1856}
1857
1858static struct file_operations ftrace_pid_fops = {
1859 .read = ftrace_pid_read,
1860 .write = ftrace_pid_write,
1861};
1862
1863static __init int ftrace_init_debugfs(void)
1864{
1865 struct dentry *d_tracer;
1866 struct dentry *entry;
1867
1868 d_tracer = tracing_init_dentry();
1869 if (!d_tracer)
1870 return 0;
1871
1872 ftrace_init_dyn_debugfs(d_tracer);
1873
1874 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1875 NULL, &ftrace_pid_fops);
1876 if (!entry)
1877 pr_warning("Could not create debugfs "
1878 "'set_ftrace_pid' entry\n");
1879 return 0;
1880}
1881
1882fs_initcall(ftrace_init_debugfs);
1883
Steven Rostedt3d083392008-05-12 21:20:42 +02001884/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04001885 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001886 *
1887 * This function should be used by panic code. It stops ftrace
1888 * but in a not so nice way. If you need to simply kill ftrace
1889 * from a non-atomic section, use ftrace_kill.
1890 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04001891void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001892{
1893 ftrace_disabled = 1;
1894 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04001895 clear_ftrace_function();
1896}
1897
1898/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001899 * register_ftrace_function - register a function for profiling
1900 * @ops - ops structure that holds the function for profiling.
1901 *
1902 * Register a function to be called by all functions in the
1903 * kernel.
1904 *
1905 * Note: @ops->func and all the functions it calls must be labeled
1906 * with "notrace", otherwise it will go into a
1907 * recursive loop.
1908 */
1909int register_ftrace_function(struct ftrace_ops *ops)
1910{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001911 int ret;
1912
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001913 if (unlikely(ftrace_disabled))
1914 return -1;
1915
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001916 mutex_lock(&ftrace_sysctl_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001917
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001918 ret = __register_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001919 ftrace_startup(0);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001920
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001921 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001922 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001923}
1924
1925/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01001926 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02001927 * @ops - ops structure that holds the function to unregister
1928 *
1929 * Unregister a function that was added to be called by ftrace profiling.
1930 */
1931int unregister_ftrace_function(struct ftrace_ops *ops)
1932{
1933 int ret;
1934
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001935 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001936 ret = __unregister_ftrace_function(ops);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001937 ftrace_shutdown(0);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001938 mutex_unlock(&ftrace_sysctl_lock);
1939
1940 return ret;
1941}
1942
Ingo Molnare309b412008-05-12 21:20:51 +02001943int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001944ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001945 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001946 loff_t *ppos)
1947{
1948 int ret;
1949
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001950 if (unlikely(ftrace_disabled))
1951 return -ENODEV;
1952
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001953 mutex_lock(&ftrace_sysctl_lock);
1954
Steven Rostedt5072c592008-05-12 21:20:43 +02001955 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001956
1957 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1958 goto out;
1959
1960 last_ftrace_enabled = ftrace_enabled;
1961
1962 if (ftrace_enabled) {
1963
1964 ftrace_startup_sysctl();
1965
1966 /* we are starting ftrace again */
1967 if (ftrace_list != &ftrace_list_end) {
1968 if (ftrace_list->next == &ftrace_list_end)
1969 ftrace_trace_function = ftrace_list->func;
1970 else
1971 ftrace_trace_function = ftrace_list_func;
1972 }
1973
1974 } else {
1975 /* stopping ftrace calls (just send to ftrace_stub) */
1976 ftrace_trace_function = ftrace_stub;
1977
1978 ftrace_shutdown_sysctl();
1979 }
1980
1981 out:
1982 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001983 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001984}
Ingo Molnarf17845e2008-10-24 12:47:10 +02001985
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001986#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001987
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001988static atomic_t ftrace_graph_active;
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08001989static struct notifier_block ftrace_suspend_notifier;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001990
Steven Rostedte49dc192008-12-02 23:50:05 -05001991int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1992{
1993 return 0;
1994}
1995
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01001996/* The callbacks that hook a function */
1997trace_func_graph_ret_t ftrace_graph_return =
1998 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05001999trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002000
2001/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2002static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2003{
2004 int i;
2005 int ret = 0;
2006 unsigned long flags;
2007 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2008 struct task_struct *g, *t;
2009
2010 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2011 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2012 * sizeof(struct ftrace_ret_stack),
2013 GFP_KERNEL);
2014 if (!ret_stack_list[i]) {
2015 start = 0;
2016 end = i;
2017 ret = -ENOMEM;
2018 goto free;
2019 }
2020 }
2021
2022 read_lock_irqsave(&tasklist_lock, flags);
2023 do_each_thread(g, t) {
2024 if (start == end) {
2025 ret = -EAGAIN;
2026 goto unlock;
2027 }
2028
2029 if (t->ret_stack == NULL) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002030 t->curr_ret_stack = -1;
Frederic Weisbecker48d68b22008-12-02 00:20:39 +01002031 /* Make sure IRQs see the -1 first: */
2032 barrier();
2033 t->ret_stack = ret_stack_list[start++];
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01002034 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002035 atomic_set(&t->trace_overrun, 0);
2036 }
2037 } while_each_thread(g, t);
2038
2039unlock:
2040 read_unlock_irqrestore(&tasklist_lock, flags);
2041free:
2042 for (i = start; i < end; i++)
2043 kfree(ret_stack_list[i]);
2044 return ret;
2045}
2046
2047/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002048static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002049{
2050 struct ftrace_ret_stack **ret_stack_list;
2051 int ret;
2052
2053 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2054 sizeof(struct ftrace_ret_stack *),
2055 GFP_KERNEL);
2056
2057 if (!ret_stack_list)
2058 return -ENOMEM;
2059
2060 do {
2061 ret = alloc_retstack_tasklist(ret_stack_list);
2062 } while (ret == -EAGAIN);
2063
2064 kfree(ret_stack_list);
2065 return ret;
2066}
2067
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08002068/*
2069 * Hibernation protection.
2070 * The state of the current task is too much unstable during
2071 * suspend/restore to disk. We want to protect against that.
2072 */
2073static int
2074ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2075 void *unused)
2076{
2077 switch (state) {
2078 case PM_HIBERNATION_PREPARE:
2079 pause_graph_tracing();
2080 break;
2081
2082 case PM_POST_HIBERNATION:
2083 unpause_graph_tracing();
2084 break;
2085 }
2086 return NOTIFY_DONE;
2087}
2088
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002089int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2090 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002091{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002092 int ret = 0;
2093
2094 mutex_lock(&ftrace_sysctl_lock);
2095
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08002096 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2097 register_pm_notifier(&ftrace_suspend_notifier);
2098
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002099 atomic_inc(&ftrace_graph_active);
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002100 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002101 if (ret) {
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002102 atomic_dec(&ftrace_graph_active);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002103 goto out;
2104 }
Steven Rostedte53a6312008-11-26 00:16:25 -05002105
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002106 ftrace_graph_return = retfunc;
2107 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05002108
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002109 ftrace_startup(FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002110
2111out:
2112 mutex_unlock(&ftrace_sysctl_lock);
2113 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002114}
2115
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002116void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002117{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002118 mutex_lock(&ftrace_sysctl_lock);
2119
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002120 atomic_dec(&ftrace_graph_active);
2121 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05002122 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05002123 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08002124 unregister_pm_notifier(&ftrace_suspend_notifier);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01002125
2126 mutex_unlock(&ftrace_sysctl_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002127}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002128
2129/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002130void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002131{
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01002132 if (atomic_read(&ftrace_graph_active)) {
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002133 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2134 * sizeof(struct ftrace_ret_stack),
2135 GFP_KERNEL);
2136 if (!t->ret_stack)
2137 return;
2138 t->curr_ret_stack = -1;
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01002139 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002140 atomic_set(&t->trace_overrun, 0);
2141 } else
2142 t->ret_stack = NULL;
2143}
2144
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01002145void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002146{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01002147 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2148
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002149 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01002150 /* NULL must become visible to IRQs before we free it: */
2151 barrier();
2152
2153 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01002154}
Steven Rostedt14a866c2008-12-02 23:50:02 -05002155
2156void ftrace_graph_stop(void)
2157{
2158 ftrace_stop();
2159}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01002160#endif
2161