blob: 0fcc6caead1cf7b66649d6d939f1a4c8511c4e9a [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010025#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020026#include <linux/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020028#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020029#include <linux/list.h>
Steven Rostedt59df055f2009-02-14 15:29:06 -050030#include <linux/hash.h>
Paul E. McKenney3f379b02010-03-05 15:03:25 -080031#include <linux/rcupdate.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020032
Steven Rostedtad8d75f2009-04-14 19:39:12 -040033#include <trace/events/sched.h>
Steven Rostedt8aef2d22009-03-24 01:10:15 -040034
Steven Rostedt2af15d62009-05-28 13:37:24 -040035#include <asm/setup.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053036
Steven Rostedt0706f1c2009-03-23 23:12:58 -040037#include "trace_output.h"
Steven Rostedtbac429f2009-03-20 12:50:56 -040038#include "trace_stat.h"
Steven Rostedt3d083392008-05-12 21:20:42 +020039
Steven Rostedt6912896e2008-10-23 09:33:03 -040040#define FTRACE_WARN_ON(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040041 ({ \
42 int ___r = cond; \
43 if (WARN_ON(___r)) \
Steven Rostedt6912896e2008-10-23 09:33:03 -040044 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040045 ___r; \
46 })
Steven Rostedt6912896e2008-10-23 09:33:03 -040047
48#define FTRACE_WARN_ON_ONCE(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040049 ({ \
50 int ___r = cond; \
51 if (WARN_ON_ONCE(___r)) \
Steven Rostedt6912896e2008-10-23 09:33:03 -040052 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040053 ___r; \
54 })
Steven Rostedt6912896e2008-10-23 09:33:03 -040055
Steven Rostedt8fc0c702009-02-16 15:28:00 -050056/* hash bits for specific function selection */
57#define FTRACE_HASH_BITS 7
58#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
Steven Rostedt33dc9b12011-05-02 17:34:47 -040059#define FTRACE_HASH_DEFAULT_BITS 10
60#define FTRACE_HASH_MAX_BITS 12
Steven Rostedt8fc0c702009-02-16 15:28:00 -050061
Steven Rostedt4eebcc82008-05-12 21:20:48 +020062/* ftrace_enabled is a method to turn ftrace on or off */
63int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020064static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020065
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050066/* Quick disabling of function tracer. */
67int function_trace_stop;
68
jolsa@redhat.com756d17e2009-10-13 16:33:52 -040069/* List for set_ftrace_pid's pids. */
70LIST_HEAD(ftrace_pids);
71struct ftrace_pid {
72 struct list_head list;
73 struct pid *pid;
74};
75
Steven Rostedt4eebcc82008-05-12 21:20:48 +020076/*
77 * ftrace_disabled is set when an anomaly is discovered.
78 * ftrace_disabled is much stronger than ftrace_enabled.
79 */
80static int ftrace_disabled __read_mostly;
81
Steven Rostedt52baf112009-02-14 01:15:39 -050082static DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020083
Paul McQuadebd38c0e2011-05-31 20:51:55 +010084static struct ftrace_ops ftrace_list_end __read_mostly = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -040085 .func = ftrace_stub,
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020086};
87
Steven Rostedtb8489142011-05-04 09:27:52 -040088static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
89static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020090ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedt6331c282011-07-13 15:11:02 -040091static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050092ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -050093ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Steven Rostedt2b499382011-05-03 22:49:52 -040094static struct ftrace_ops global_ops;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020095
Steven Rostedtb8489142011-05-04 09:27:52 -040096static void
97ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
98
Paul E. McKenney3f379b02010-03-05 15:03:25 -080099/*
Steven Rostedtb8489142011-05-04 09:27:52 -0400100 * Traverse the ftrace_global_list, invoking all entries. The reason that we
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800101 * can use rcu_dereference_raw() is that elements removed from this list
102 * are simply leaked, so there is no need to interact with a grace-period
103 * mechanism. The rcu_dereference_raw() calls are needed to handle
Steven Rostedtb8489142011-05-04 09:27:52 -0400104 * concurrent insertions into the ftrace_global_list.
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800105 *
106 * Silly Alpha and silly pointer-speculation compiler optimizations!
107 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400108static void ftrace_global_list_func(unsigned long ip,
109 unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200110{
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400111 struct ftrace_ops *op;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200112
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400113 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
114 return;
115
116 trace_recursion_set(TRACE_GLOBAL_BIT);
117 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200118 while (op != &ftrace_list_end) {
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119 op->func(ip, parent_ip);
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800120 op = rcu_dereference_raw(op->next); /*see above*/
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200121 };
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400122 trace_recursion_clear(TRACE_GLOBAL_BIT);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200123}
124
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500125static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
126{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -0500127 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500128 return;
129
130 ftrace_pid_function(ip, parent_ip);
131}
132
133static void set_ftrace_pid_function(ftrace_func_t func)
134{
135 /* do not set ftrace_pid_function to itself! */
136 if (func != ftrace_pid_func)
137 ftrace_pid_function = func;
138}
139
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200140/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200141 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200142 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200143 * This NULLs the ftrace function and in essence stops
144 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200145 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200146void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200147{
Steven Rostedt3d083392008-05-12 21:20:42 +0200148 ftrace_trace_function = ftrace_stub;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500149 __ftrace_trace_function = ftrace_stub;
Steven Rostedt6331c282011-07-13 15:11:02 -0400150 __ftrace_trace_function_delay = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500151 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200152}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200153
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500154#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
155/*
156 * For those archs that do not test ftrace_trace_stop in their
157 * mcount call site, we need to do it from C.
158 */
159static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
160{
161 if (function_trace_stop)
162 return;
163
164 __ftrace_trace_function(ip, parent_ip);
165}
166#endif
167
Steven Rostedt2b499382011-05-03 22:49:52 -0400168static void update_global_ops(void)
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400169{
170 ftrace_func_t func;
171
172 /*
173 * If there's only one function registered, then call that
174 * function directly. Otherwise, we need to iterate over the
175 * registered callers.
176 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400177 if (ftrace_global_list == &ftrace_list_end ||
178 ftrace_global_list->next == &ftrace_list_end)
179 func = ftrace_global_list->func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400180 else
Steven Rostedtb8489142011-05-04 09:27:52 -0400181 func = ftrace_global_list_func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400182
183 /* If we filter on pids, update to use the pid function */
184 if (!list_empty(&ftrace_pids)) {
185 set_ftrace_pid_function(func);
186 func = ftrace_pid_func;
187 }
Steven Rostedt2b499382011-05-03 22:49:52 -0400188
189 global_ops.func = func;
190}
191
192static void update_ftrace_function(void)
193{
194 ftrace_func_t func;
195
196 update_global_ops();
197
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400198 /*
199 * If we are at the end of the list and this ops is
200 * not dynamic, then have the mcount trampoline call
201 * the function directly
202 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400203 if (ftrace_ops_list == &ftrace_list_end ||
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400204 (ftrace_ops_list->next == &ftrace_list_end &&
205 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
Steven Rostedtb8489142011-05-04 09:27:52 -0400206 func = ftrace_ops_list->func;
207 else
208 func = ftrace_ops_list_func;
Steven Rostedt2b499382011-05-03 22:49:52 -0400209
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400210#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211 ftrace_trace_function = func;
212#else
Steven Rostedt6331c282011-07-13 15:11:02 -0400213#ifdef CONFIG_DYNAMIC_FTRACE
214 /* do not update till all functions have been modified */
215 __ftrace_trace_function_delay = func;
216#else
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400217 __ftrace_trace_function = func;
Steven Rostedt6331c282011-07-13 15:11:02 -0400218#endif
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400219 ftrace_trace_function = ftrace_test_stop_func;
220#endif
221}
222
Steven Rostedt2b499382011-05-03 22:49:52 -0400223static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200224{
Steven Rostedt2b499382011-05-03 22:49:52 -0400225 ops->next = *list;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200226 /*
Steven Rostedtb8489142011-05-04 09:27:52 -0400227 * We are entering ops into the list but another
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200228 * CPU might be walking that list. We need to make sure
229 * the ops->next pointer is valid before another CPU sees
Steven Rostedtb8489142011-05-04 09:27:52 -0400230 * the ops pointer included into the list.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200231 */
Steven Rostedt2b499382011-05-03 22:49:52 -0400232 rcu_assign_pointer(*list, ops);
233}
Steven Rostedt3d083392008-05-12 21:20:42 +0200234
Steven Rostedt2b499382011-05-03 22:49:52 -0400235static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
236{
237 struct ftrace_ops **p;
238
239 /*
240 * If we are removing the last function, then simply point
241 * to the ftrace_stub.
242 */
243 if (*list == ops && ops->next == &ftrace_list_end) {
244 *list = &ftrace_list_end;
245 return 0;
246 }
247
248 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
249 if (*p == ops)
250 break;
251
252 if (*p != ops)
253 return -1;
254
255 *p = (*p)->next;
256 return 0;
257}
258
259static int __register_ftrace_function(struct ftrace_ops *ops)
260{
261 if (ftrace_disabled)
262 return -ENODEV;
263
264 if (FTRACE_WARN_ON(ops == &global_ops))
265 return -EINVAL;
266
Steven Rostedtb8489142011-05-04 09:27:52 -0400267 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
268 return -EBUSY;
269
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400270 if (!core_kernel_data((unsigned long)ops))
271 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
272
Steven Rostedtb8489142011-05-04 09:27:52 -0400273 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
274 int first = ftrace_global_list == &ftrace_list_end;
275 add_ftrace_ops(&ftrace_global_list, ops);
276 ops->flags |= FTRACE_OPS_FL_ENABLED;
277 if (first)
278 add_ftrace_ops(&ftrace_ops_list, &global_ops);
279 } else
280 add_ftrace_ops(&ftrace_ops_list, ops);
281
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400282 if (ftrace_enabled)
283 update_ftrace_function();
Steven Rostedt3d083392008-05-12 21:20:42 +0200284
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200285 return 0;
286}
287
Ingo Molnare309b412008-05-12 21:20:51 +0200288static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200289{
Steven Rostedt2b499382011-05-03 22:49:52 -0400290 int ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200291
Steven Rostedt2b499382011-05-03 22:49:52 -0400292 if (ftrace_disabled)
293 return -ENODEV;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200294
Steven Rostedtb8489142011-05-04 09:27:52 -0400295 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
296 return -EBUSY;
297
Steven Rostedt2b499382011-05-03 22:49:52 -0400298 if (FTRACE_WARN_ON(ops == &global_ops))
299 return -EINVAL;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200300
Steven Rostedtb8489142011-05-04 09:27:52 -0400301 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
302 ret = remove_ftrace_ops(&ftrace_global_list, ops);
303 if (!ret && ftrace_global_list == &ftrace_list_end)
304 ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
305 if (!ret)
306 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
307 } else
308 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
309
Steven Rostedt2b499382011-05-03 22:49:52 -0400310 if (ret < 0)
311 return ret;
Steven Rostedtb8489142011-05-04 09:27:52 -0400312
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400313 if (ftrace_enabled)
314 update_ftrace_function();
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200315
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400316 /*
317 * Dynamic ops may be freed, we must make sure that all
318 * callers are done before leaving this function.
319 */
320 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
321 synchronize_sched();
322
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500323 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200324}
325
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500326static void ftrace_update_pid_func(void)
327{
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400328 /* Only do something if we are tracing something */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500329 if (ftrace_trace_function == ftrace_stub)
KOSAKI Motohiro10dd3eb2009-03-06 15:29:04 +0900330 return;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500331
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400332 update_ftrace_function();
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500333}
334
Steven Rostedt493762f2009-03-23 17:12:36 -0400335#ifdef CONFIG_FUNCTION_PROFILER
336struct ftrace_profile {
337 struct hlist_node node;
338 unsigned long ip;
339 unsigned long counter;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400340#ifdef CONFIG_FUNCTION_GRAPH_TRACER
341 unsigned long long time;
Chase Douglase330b3b2010-04-26 14:02:05 -0400342 unsigned long long time_squared;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400343#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400344};
345
346struct ftrace_profile_page {
347 struct ftrace_profile_page *next;
348 unsigned long index;
349 struct ftrace_profile records[];
350};
351
Steven Rostedtcafb1682009-03-24 20:50:39 -0400352struct ftrace_profile_stat {
353 atomic_t disabled;
354 struct hlist_head *hash;
355 struct ftrace_profile_page *pages;
356 struct ftrace_profile_page *start;
357 struct tracer_stat stat;
358};
359
Steven Rostedt493762f2009-03-23 17:12:36 -0400360#define PROFILE_RECORDS_SIZE \
361 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
362
363#define PROFILES_PER_PAGE \
364 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
365
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400366static int ftrace_profile_bits __read_mostly;
367static int ftrace_profile_enabled __read_mostly;
368
369/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
Steven Rostedt493762f2009-03-23 17:12:36 -0400370static DEFINE_MUTEX(ftrace_profile_lock);
371
Steven Rostedtcafb1682009-03-24 20:50:39 -0400372static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
Steven Rostedt493762f2009-03-23 17:12:36 -0400373
374#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
375
Steven Rostedt493762f2009-03-23 17:12:36 -0400376static void *
377function_stat_next(void *v, int idx)
378{
379 struct ftrace_profile *rec = v;
380 struct ftrace_profile_page *pg;
381
382 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
383
384 again:
Li Zefan0296e422009-06-26 11:15:37 +0800385 if (idx != 0)
386 rec++;
387
Steven Rostedt493762f2009-03-23 17:12:36 -0400388 if ((void *)rec >= (void *)&pg->records[pg->index]) {
389 pg = pg->next;
390 if (!pg)
391 return NULL;
392 rec = &pg->records[0];
393 if (!rec->counter)
394 goto again;
395 }
396
397 return rec;
398}
399
400static void *function_stat_start(struct tracer_stat *trace)
401{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400402 struct ftrace_profile_stat *stat =
403 container_of(trace, struct ftrace_profile_stat, stat);
404
405 if (!stat || !stat->start)
406 return NULL;
407
408 return function_stat_next(&stat->start->records[0], 0);
Steven Rostedt493762f2009-03-23 17:12:36 -0400409}
410
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400411#ifdef CONFIG_FUNCTION_GRAPH_TRACER
412/* function graph compares on total time */
413static int function_stat_cmp(void *p1, void *p2)
414{
415 struct ftrace_profile *a = p1;
416 struct ftrace_profile *b = p2;
417
418 if (a->time < b->time)
419 return -1;
420 if (a->time > b->time)
421 return 1;
422 else
423 return 0;
424}
425#else
426/* not function graph compares against hits */
Steven Rostedt493762f2009-03-23 17:12:36 -0400427static int function_stat_cmp(void *p1, void *p2)
428{
429 struct ftrace_profile *a = p1;
430 struct ftrace_profile *b = p2;
431
432 if (a->counter < b->counter)
433 return -1;
434 if (a->counter > b->counter)
435 return 1;
436 else
437 return 0;
438}
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400439#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400440
441static int function_stat_headers(struct seq_file *m)
442{
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400443#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400444 seq_printf(m, " Function "
Chase Douglase330b3b2010-04-26 14:02:05 -0400445 "Hit Time Avg s^2\n"
Steven Rostedt34886c82009-03-25 21:00:47 -0400446 " -------- "
Chase Douglase330b3b2010-04-26 14:02:05 -0400447 "--- ---- --- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400448#else
Steven Rostedt493762f2009-03-23 17:12:36 -0400449 seq_printf(m, " Function Hit\n"
450 " -------- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400451#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400452 return 0;
453}
454
455static int function_stat_show(struct seq_file *m, void *v)
456{
457 struct ftrace_profile *rec = v;
458 char str[KSYM_SYMBOL_LEN];
Li Zefan3aaba202010-08-23 16:50:12 +0800459 int ret = 0;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400460#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400461 static struct trace_seq s;
462 unsigned long long avg;
Chase Douglase330b3b2010-04-26 14:02:05 -0400463 unsigned long long stddev;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400464#endif
Li Zefan3aaba202010-08-23 16:50:12 +0800465 mutex_lock(&ftrace_profile_lock);
466
467 /* we raced with function_profile_reset() */
468 if (unlikely(rec->counter == 0)) {
469 ret = -EBUSY;
470 goto out;
471 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400472
473 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400474 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
Steven Rostedt493762f2009-03-23 17:12:36 -0400475
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400476#ifdef CONFIG_FUNCTION_GRAPH_TRACER
477 seq_printf(m, " ");
Steven Rostedt34886c82009-03-25 21:00:47 -0400478 avg = rec->time;
479 do_div(avg, rec->counter);
480
Chase Douglase330b3b2010-04-26 14:02:05 -0400481 /* Sample standard deviation (s^2) */
482 if (rec->counter <= 1)
483 stddev = 0;
484 else {
485 stddev = rec->time_squared - rec->counter * avg * avg;
486 /*
487 * Divide only 1000 for ns^2 -> us^2 conversion.
488 * trace_print_graph_duration will divide 1000 again.
489 */
490 do_div(stddev, (rec->counter - 1) * 1000);
491 }
492
Steven Rostedt34886c82009-03-25 21:00:47 -0400493 trace_seq_init(&s);
494 trace_print_graph_duration(rec->time, &s);
495 trace_seq_puts(&s, " ");
496 trace_print_graph_duration(avg, &s);
Chase Douglase330b3b2010-04-26 14:02:05 -0400497 trace_seq_puts(&s, " ");
498 trace_print_graph_duration(stddev, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400499 trace_print_seq(m, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400500#endif
501 seq_putc(m, '\n');
Li Zefan3aaba202010-08-23 16:50:12 +0800502out:
503 mutex_unlock(&ftrace_profile_lock);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400504
Li Zefan3aaba202010-08-23 16:50:12 +0800505 return ret;
Steven Rostedt493762f2009-03-23 17:12:36 -0400506}
507
Steven Rostedtcafb1682009-03-24 20:50:39 -0400508static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400509{
510 struct ftrace_profile_page *pg;
511
Steven Rostedtcafb1682009-03-24 20:50:39 -0400512 pg = stat->pages = stat->start;
Steven Rostedt493762f2009-03-23 17:12:36 -0400513
514 while (pg) {
515 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
516 pg->index = 0;
517 pg = pg->next;
518 }
519
Steven Rostedtcafb1682009-03-24 20:50:39 -0400520 memset(stat->hash, 0,
Steven Rostedt493762f2009-03-23 17:12:36 -0400521 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
522}
523
Steven Rostedtcafb1682009-03-24 20:50:39 -0400524int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400525{
526 struct ftrace_profile_page *pg;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400527 int functions;
528 int pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400529 int i;
530
531 /* If we already allocated, do nothing */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400532 if (stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400533 return 0;
534
Steven Rostedtcafb1682009-03-24 20:50:39 -0400535 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
536 if (!stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400537 return -ENOMEM;
538
Steven Rostedt318e0a72009-03-25 20:06:34 -0400539#ifdef CONFIG_DYNAMIC_FTRACE
540 functions = ftrace_update_tot_cnt;
541#else
542 /*
543 * We do not know the number of functions that exist because
544 * dynamic tracing is what counts them. With past experience
545 * we have around 20K functions. That should be more than enough.
546 * It is highly unlikely we will execute every function in
547 * the kernel.
548 */
549 functions = 20000;
550#endif
551
Steven Rostedtcafb1682009-03-24 20:50:39 -0400552 pg = stat->start = stat->pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400553
Steven Rostedt318e0a72009-03-25 20:06:34 -0400554 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
555
556 for (i = 0; i < pages; i++) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400557 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400558 if (!pg->next)
Steven Rostedt318e0a72009-03-25 20:06:34 -0400559 goto out_free;
Steven Rostedt493762f2009-03-23 17:12:36 -0400560 pg = pg->next;
561 }
562
563 return 0;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400564
565 out_free:
566 pg = stat->start;
567 while (pg) {
568 unsigned long tmp = (unsigned long)pg;
569
570 pg = pg->next;
571 free_page(tmp);
572 }
573
574 free_page((unsigned long)stat->pages);
575 stat->pages = NULL;
576 stat->start = NULL;
577
578 return -ENOMEM;
Steven Rostedt493762f2009-03-23 17:12:36 -0400579}
580
Steven Rostedtcafb1682009-03-24 20:50:39 -0400581static int ftrace_profile_init_cpu(int cpu)
Steven Rostedt493762f2009-03-23 17:12:36 -0400582{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400583 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400584 int size;
585
Steven Rostedtcafb1682009-03-24 20:50:39 -0400586 stat = &per_cpu(ftrace_profile_stats, cpu);
587
588 if (stat->hash) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400589 /* If the profile is already created, simply reset it */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400590 ftrace_profile_reset(stat);
Steven Rostedt493762f2009-03-23 17:12:36 -0400591 return 0;
592 }
593
594 /*
595 * We are profiling all functions, but usually only a few thousand
596 * functions are hit. We'll make a hash of 1024 items.
597 */
598 size = FTRACE_PROFILE_HASH_SIZE;
599
Steven Rostedtcafb1682009-03-24 20:50:39 -0400600 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400601
Steven Rostedtcafb1682009-03-24 20:50:39 -0400602 if (!stat->hash)
Steven Rostedt493762f2009-03-23 17:12:36 -0400603 return -ENOMEM;
604
Steven Rostedtcafb1682009-03-24 20:50:39 -0400605 if (!ftrace_profile_bits) {
606 size--;
Steven Rostedt493762f2009-03-23 17:12:36 -0400607
Steven Rostedtcafb1682009-03-24 20:50:39 -0400608 for (; size; size >>= 1)
609 ftrace_profile_bits++;
610 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400611
Steven Rostedt318e0a72009-03-25 20:06:34 -0400612 /* Preallocate the function profiling pages */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400613 if (ftrace_profile_pages_init(stat) < 0) {
614 kfree(stat->hash);
615 stat->hash = NULL;
Steven Rostedt493762f2009-03-23 17:12:36 -0400616 return -ENOMEM;
617 }
618
619 return 0;
620}
621
Steven Rostedtcafb1682009-03-24 20:50:39 -0400622static int ftrace_profile_init(void)
623{
624 int cpu;
625 int ret = 0;
626
627 for_each_online_cpu(cpu) {
628 ret = ftrace_profile_init_cpu(cpu);
629 if (ret)
630 break;
631 }
632
633 return ret;
634}
635
Steven Rostedt493762f2009-03-23 17:12:36 -0400636/* interrupts must be disabled */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400637static struct ftrace_profile *
638ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400639{
640 struct ftrace_profile *rec;
641 struct hlist_head *hhd;
642 struct hlist_node *n;
643 unsigned long key;
644
645 key = hash_long(ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400646 hhd = &stat->hash[key];
Steven Rostedt493762f2009-03-23 17:12:36 -0400647
648 if (hlist_empty(hhd))
649 return NULL;
650
651 hlist_for_each_entry_rcu(rec, n, hhd, node) {
652 if (rec->ip == ip)
653 return rec;
654 }
655
656 return NULL;
657}
658
Steven Rostedtcafb1682009-03-24 20:50:39 -0400659static void ftrace_add_profile(struct ftrace_profile_stat *stat,
660 struct ftrace_profile *rec)
Steven Rostedt493762f2009-03-23 17:12:36 -0400661{
662 unsigned long key;
663
664 key = hash_long(rec->ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400665 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
Steven Rostedt493762f2009-03-23 17:12:36 -0400666}
667
Steven Rostedt318e0a72009-03-25 20:06:34 -0400668/*
669 * The memory is already allocated, this simply finds a new record to use.
670 */
Steven Rostedt493762f2009-03-23 17:12:36 -0400671static struct ftrace_profile *
Steven Rostedt318e0a72009-03-25 20:06:34 -0400672ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400673{
674 struct ftrace_profile *rec = NULL;
675
Steven Rostedt318e0a72009-03-25 20:06:34 -0400676 /* prevent recursion (from NMIs) */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400677 if (atomic_inc_return(&stat->disabled) != 1)
Steven Rostedt493762f2009-03-23 17:12:36 -0400678 goto out;
679
Steven Rostedt493762f2009-03-23 17:12:36 -0400680 /*
Steven Rostedt318e0a72009-03-25 20:06:34 -0400681 * Try to find the function again since an NMI
682 * could have added it
Steven Rostedt493762f2009-03-23 17:12:36 -0400683 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400684 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400685 if (rec)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400686 goto out;
Steven Rostedt493762f2009-03-23 17:12:36 -0400687
Steven Rostedtcafb1682009-03-24 20:50:39 -0400688 if (stat->pages->index == PROFILES_PER_PAGE) {
689 if (!stat->pages->next)
690 goto out;
691 stat->pages = stat->pages->next;
Steven Rostedt493762f2009-03-23 17:12:36 -0400692 }
693
Steven Rostedtcafb1682009-03-24 20:50:39 -0400694 rec = &stat->pages->records[stat->pages->index++];
Steven Rostedt493762f2009-03-23 17:12:36 -0400695 rec->ip = ip;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400696 ftrace_add_profile(stat, rec);
Steven Rostedt493762f2009-03-23 17:12:36 -0400697
Steven Rostedt493762f2009-03-23 17:12:36 -0400698 out:
Steven Rostedtcafb1682009-03-24 20:50:39 -0400699 atomic_dec(&stat->disabled);
Steven Rostedt493762f2009-03-23 17:12:36 -0400700
701 return rec;
702}
703
Steven Rostedt493762f2009-03-23 17:12:36 -0400704static void
705function_profile_call(unsigned long ip, unsigned long parent_ip)
706{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400707 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400708 struct ftrace_profile *rec;
709 unsigned long flags;
Steven Rostedt493762f2009-03-23 17:12:36 -0400710
711 if (!ftrace_profile_enabled)
712 return;
713
Steven Rostedt493762f2009-03-23 17:12:36 -0400714 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400715
716 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400717 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400718 goto out;
719
720 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400721 if (!rec) {
Steven Rostedt318e0a72009-03-25 20:06:34 -0400722 rec = ftrace_profile_alloc(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400723 if (!rec)
724 goto out;
725 }
726
727 rec->counter++;
728 out:
729 local_irq_restore(flags);
730}
731
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400732#ifdef CONFIG_FUNCTION_GRAPH_TRACER
733static int profile_graph_entry(struct ftrace_graph_ent *trace)
734{
735 function_profile_call(trace->func, 0);
736 return 1;
737}
738
739static void profile_graph_return(struct ftrace_graph_ret *trace)
740{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400741 struct ftrace_profile_stat *stat;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400742 unsigned long long calltime;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400743 struct ftrace_profile *rec;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400744 unsigned long flags;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400745
746 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400747 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400748 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400749 goto out;
750
Steven Rostedt37e44bc2010-04-27 21:04:24 -0400751 /* If the calltime was zero'd ignore it */
752 if (!trace->calltime)
753 goto out;
754
Steven Rostedta2a16d62009-03-24 23:17:58 -0400755 calltime = trace->rettime - trace->calltime;
756
757 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
758 int index;
759
760 index = trace->depth;
761
762 /* Append this call time to the parent time to subtract */
763 if (index)
764 current->ret_stack[index - 1].subtime += calltime;
765
766 if (current->ret_stack[index].subtime < calltime)
767 calltime -= current->ret_stack[index].subtime;
768 else
769 calltime = 0;
770 }
771
Steven Rostedtcafb1682009-03-24 20:50:39 -0400772 rec = ftrace_find_profiled_func(stat, trace->func);
Chase Douglase330b3b2010-04-26 14:02:05 -0400773 if (rec) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400774 rec->time += calltime;
Chase Douglase330b3b2010-04-26 14:02:05 -0400775 rec->time_squared += calltime * calltime;
776 }
Steven Rostedta2a16d62009-03-24 23:17:58 -0400777
Steven Rostedtcafb1682009-03-24 20:50:39 -0400778 out:
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400779 local_irq_restore(flags);
780}
781
782static int register_ftrace_profiler(void)
783{
784 return register_ftrace_graph(&profile_graph_return,
785 &profile_graph_entry);
786}
787
788static void unregister_ftrace_profiler(void)
789{
790 unregister_ftrace_graph();
791}
792#else
Paul McQuadebd38c0e2011-05-31 20:51:55 +0100793static struct ftrace_ops ftrace_profile_ops __read_mostly = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400794 .func = function_profile_call,
Steven Rostedt493762f2009-03-23 17:12:36 -0400795};
796
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400797static int register_ftrace_profiler(void)
798{
799 return register_ftrace_function(&ftrace_profile_ops);
800}
801
802static void unregister_ftrace_profiler(void)
803{
804 unregister_ftrace_function(&ftrace_profile_ops);
805}
806#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
807
Steven Rostedt493762f2009-03-23 17:12:36 -0400808static ssize_t
809ftrace_profile_write(struct file *filp, const char __user *ubuf,
810 size_t cnt, loff_t *ppos)
811{
812 unsigned long val;
Steven Rostedt493762f2009-03-23 17:12:36 -0400813 int ret;
814
Peter Huewe22fe9b52011-06-07 21:58:27 +0200815 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
816 if (ret)
Steven Rostedt493762f2009-03-23 17:12:36 -0400817 return ret;
818
819 val = !!val;
820
821 mutex_lock(&ftrace_profile_lock);
822 if (ftrace_profile_enabled ^ val) {
823 if (val) {
824 ret = ftrace_profile_init();
825 if (ret < 0) {
826 cnt = ret;
827 goto out;
828 }
829
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400830 ret = register_ftrace_profiler();
831 if (ret < 0) {
832 cnt = ret;
833 goto out;
834 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400835 ftrace_profile_enabled = 1;
836 } else {
837 ftrace_profile_enabled = 0;
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400838 /*
839 * unregister_ftrace_profiler calls stop_machine
840 * so this acts like an synchronize_sched.
841 */
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400842 unregister_ftrace_profiler();
Steven Rostedt493762f2009-03-23 17:12:36 -0400843 }
844 }
845 out:
846 mutex_unlock(&ftrace_profile_lock);
847
Jiri Olsacf8517c2009-10-23 19:36:16 -0400848 *ppos += cnt;
Steven Rostedt493762f2009-03-23 17:12:36 -0400849
850 return cnt;
851}
852
853static ssize_t
854ftrace_profile_read(struct file *filp, char __user *ubuf,
855 size_t cnt, loff_t *ppos)
856{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400857 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400858 int r;
859
860 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
861 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
862}
863
864static const struct file_operations ftrace_profile_fops = {
865 .open = tracing_open_generic,
866 .read = ftrace_profile_read,
867 .write = ftrace_profile_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200868 .llseek = default_llseek,
Steven Rostedt493762f2009-03-23 17:12:36 -0400869};
870
Steven Rostedtcafb1682009-03-24 20:50:39 -0400871/* used to initialize the real stat files */
872static struct tracer_stat function_stats __initdata = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400873 .name = "functions",
874 .stat_start = function_stat_start,
875 .stat_next = function_stat_next,
876 .stat_cmp = function_stat_cmp,
877 .stat_headers = function_stat_headers,
878 .stat_show = function_stat_show
Steven Rostedtcafb1682009-03-24 20:50:39 -0400879};
880
Steven Rostedt6ab5d662009-06-04 00:55:45 -0400881static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400882{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400883 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400884 struct dentry *entry;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400885 char *name;
Steven Rostedt493762f2009-03-23 17:12:36 -0400886 int ret;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400887 int cpu;
Steven Rostedt493762f2009-03-23 17:12:36 -0400888
Steven Rostedtcafb1682009-03-24 20:50:39 -0400889 for_each_possible_cpu(cpu) {
890 stat = &per_cpu(ftrace_profile_stats, cpu);
891
892 /* allocate enough for function name + cpu number */
893 name = kmalloc(32, GFP_KERNEL);
894 if (!name) {
895 /*
896 * The files created are permanent, if something happens
897 * we still do not free memory.
898 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400899 WARN(1,
900 "Could not allocate stat file for cpu %d\n",
901 cpu);
902 return;
903 }
904 stat->stat = function_stats;
905 snprintf(name, 32, "function%d", cpu);
906 stat->stat.name = name;
907 ret = register_stat_tracer(&stat->stat);
908 if (ret) {
909 WARN(1,
910 "Could not register function stat for cpu %d\n",
911 cpu);
912 kfree(name);
913 return;
914 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400915 }
916
917 entry = debugfs_create_file("function_profile_enabled", 0644,
918 d_tracer, NULL, &ftrace_profile_fops);
919 if (!entry)
920 pr_warning("Could not create debugfs "
921 "'function_profile_enabled' entry\n");
922}
923
924#else /* CONFIG_FUNCTION_PROFILER */
Steven Rostedt6ab5d662009-06-04 00:55:45 -0400925static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400926{
927}
928#endif /* CONFIG_FUNCTION_PROFILER */
929
Ingo Molnar73d3fd92009-02-17 11:48:18 +0100930static struct pid * const ftrace_swapper_pid = &init_struct_pid;
931
Steven Rostedt3d083392008-05-12 21:20:42 +0200932#ifdef CONFIG_DYNAMIC_FTRACE
Ingo Molnar73d3fd92009-02-17 11:48:18 +0100933
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400934#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400935# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400936#endif
937
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500938static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
939
Steven Rostedtb6887d72009-02-17 12:32:04 -0500940struct ftrace_func_probe {
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500941 struct hlist_node node;
Steven Rostedtb6887d72009-02-17 12:32:04 -0500942 struct ftrace_probe_ops *ops;
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500943 unsigned long flags;
944 unsigned long ip;
945 void *data;
946 struct rcu_head rcu;
947};
948
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200949enum {
950 FTRACE_ENABLE_CALLS = (1 << 0),
951 FTRACE_DISABLE_CALLS = (1 << 1),
952 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
Steven Rostedt79e406d2010-09-14 22:19:46 -0400953 FTRACE_START_FUNC_RET = (1 << 3),
954 FTRACE_STOP_FUNC_RET = (1 << 4),
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200955};
Steven Rostedtb448c4e2011-04-29 15:12:32 -0400956struct ftrace_func_entry {
957 struct hlist_node hlist;
958 unsigned long ip;
959};
960
961struct ftrace_hash {
962 unsigned long size_bits;
963 struct hlist_head *buckets;
964 unsigned long count;
Steven Rostedt07fd5512011-05-05 18:03:47 -0400965 struct rcu_head rcu;
Steven Rostedtb448c4e2011-04-29 15:12:32 -0400966};
967
Steven Rostedt33dc9b12011-05-02 17:34:47 -0400968/*
969 * We make these constant because no one should touch them,
970 * but they are used as the default "empty hash", to avoid allocating
971 * it all the time. These are in a read only section such that if
972 * anyone does try to modify it, it will cause an exception.
973 */
974static const struct hlist_head empty_buckets[1];
975static const struct ftrace_hash empty_hash = {
976 .buckets = (struct hlist_head *)empty_buckets,
Steven Rostedtb448c4e2011-04-29 15:12:32 -0400977};
Steven Rostedt33dc9b12011-05-02 17:34:47 -0400978#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
Steven Rostedt5072c592008-05-12 21:20:43 +0200979
Steven Rostedt2b499382011-05-03 22:49:52 -0400980static struct ftrace_ops global_ops = {
Steven Rostedtf45948e2011-05-02 12:29:25 -0400981 .func = ftrace_stub,
Steven Rostedt33dc9b12011-05-02 17:34:47 -0400982 .notrace_hash = EMPTY_HASH,
983 .filter_hash = EMPTY_HASH,
Steven Rostedtf45948e2011-05-02 12:29:25 -0400984};
985
Lai Jiangshane94142a2009-03-13 17:51:27 +0800986static struct dyn_ftrace *ftrace_new_addrs;
Steven Rostedt3d083392008-05-12 21:20:42 +0200987
Steven Rostedt41c52c02008-05-22 11:46:33 -0400988static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200989
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200990struct ftrace_page {
991 struct ftrace_page *next;
Steven Rostedt431aa3f2009-01-06 12:43:01 -0500992 int index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200993 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700994};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200995
996#define ENTRIES_PER_PAGE \
997 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
998
999/* estimate from running different kernels */
1000#define NR_TO_INIT 10000
1001
1002static struct ftrace_page *ftrace_pages_start;
1003static struct ftrace_page *ftrace_pages;
1004
Steven Rostedt37ad50842008-05-12 21:20:48 +02001005static struct dyn_ftrace *ftrace_free_records;
1006
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001007static struct ftrace_func_entry *
1008ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1009{
1010 unsigned long key;
1011 struct ftrace_func_entry *entry;
1012 struct hlist_head *hhd;
1013 struct hlist_node *n;
1014
1015 if (!hash->count)
1016 return NULL;
1017
1018 if (hash->size_bits > 0)
1019 key = hash_long(ip, hash->size_bits);
1020 else
1021 key = 0;
1022
1023 hhd = &hash->buckets[key];
1024
1025 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1026 if (entry->ip == ip)
1027 return entry;
1028 }
1029 return NULL;
1030}
1031
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001032static void __add_hash_entry(struct ftrace_hash *hash,
1033 struct ftrace_func_entry *entry)
1034{
1035 struct hlist_head *hhd;
1036 unsigned long key;
1037
1038 if (hash->size_bits)
1039 key = hash_long(entry->ip, hash->size_bits);
1040 else
1041 key = 0;
1042
1043 hhd = &hash->buckets[key];
1044 hlist_add_head(&entry->hlist, hhd);
1045 hash->count++;
1046}
1047
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001048static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1049{
1050 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001051
1052 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1053 if (!entry)
1054 return -ENOMEM;
1055
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001056 entry->ip = ip;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001057 __add_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001058
1059 return 0;
1060}
1061
1062static void
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001063free_hash_entry(struct ftrace_hash *hash,
1064 struct ftrace_func_entry *entry)
1065{
1066 hlist_del(&entry->hlist);
1067 kfree(entry);
1068 hash->count--;
1069}
1070
1071static void
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001072remove_hash_entry(struct ftrace_hash *hash,
1073 struct ftrace_func_entry *entry)
1074{
1075 hlist_del(&entry->hlist);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001076 hash->count--;
1077}
1078
1079static void ftrace_hash_clear(struct ftrace_hash *hash)
1080{
1081 struct hlist_head *hhd;
1082 struct hlist_node *tp, *tn;
1083 struct ftrace_func_entry *entry;
1084 int size = 1 << hash->size_bits;
1085 int i;
1086
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001087 if (!hash->count)
1088 return;
1089
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001090 for (i = 0; i < size; i++) {
1091 hhd = &hash->buckets[i];
1092 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001093 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001094 }
1095 FTRACE_WARN_ON(hash->count);
1096}
1097
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001098static void free_ftrace_hash(struct ftrace_hash *hash)
1099{
1100 if (!hash || hash == EMPTY_HASH)
1101 return;
1102 ftrace_hash_clear(hash);
1103 kfree(hash->buckets);
1104 kfree(hash);
1105}
1106
Steven Rostedt07fd5512011-05-05 18:03:47 -04001107static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1108{
1109 struct ftrace_hash *hash;
1110
1111 hash = container_of(rcu, struct ftrace_hash, rcu);
1112 free_ftrace_hash(hash);
1113}
1114
1115static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1116{
1117 if (!hash || hash == EMPTY_HASH)
1118 return;
1119 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1120}
1121
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001122static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1123{
1124 struct ftrace_hash *hash;
1125 int size;
1126
1127 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1128 if (!hash)
1129 return NULL;
1130
1131 size = 1 << size_bits;
1132 hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1133
1134 if (!hash->buckets) {
1135 kfree(hash);
1136 return NULL;
1137 }
1138
1139 hash->size_bits = size_bits;
1140
1141 return hash;
1142}
1143
1144static struct ftrace_hash *
1145alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1146{
1147 struct ftrace_func_entry *entry;
1148 struct ftrace_hash *new_hash;
1149 struct hlist_node *tp;
1150 int size;
1151 int ret;
1152 int i;
1153
1154 new_hash = alloc_ftrace_hash(size_bits);
1155 if (!new_hash)
1156 return NULL;
1157
1158 /* Empty hash? */
1159 if (!hash || !hash->count)
1160 return new_hash;
1161
1162 size = 1 << hash->size_bits;
1163 for (i = 0; i < size; i++) {
1164 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1165 ret = add_hash_entry(new_hash, entry->ip);
1166 if (ret < 0)
1167 goto free_hash;
1168 }
1169 }
1170
1171 FTRACE_WARN_ON(new_hash->count != hash->count);
1172
1173 return new_hash;
1174
1175 free_hash:
1176 free_ftrace_hash(new_hash);
1177 return NULL;
1178}
1179
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001180static void
1181ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1182static void
1183ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1184
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001185static int
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001186ftrace_hash_move(struct ftrace_ops *ops, int enable,
1187 struct ftrace_hash **dst, struct ftrace_hash *src)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001188{
1189 struct ftrace_func_entry *entry;
1190 struct hlist_node *tp, *tn;
1191 struct hlist_head *hhd;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001192 struct ftrace_hash *old_hash;
1193 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001194 unsigned long key;
1195 int size = src->count;
1196 int bits = 0;
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001197 int ret;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001198 int i;
1199
1200 /*
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001201 * Remove the current set, update the hash and add
1202 * them back.
1203 */
1204 ftrace_hash_rec_disable(ops, enable);
1205
1206 /*
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001207 * If the new source is empty, just free dst and assign it
1208 * the empty_hash.
1209 */
1210 if (!src->count) {
Steven Rostedt07fd5512011-05-05 18:03:47 -04001211 free_ftrace_hash_rcu(*dst);
1212 rcu_assign_pointer(*dst, EMPTY_HASH);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001213 return 0;
1214 }
1215
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001216 /*
1217 * Make the hash size about 1/2 the # found
1218 */
1219 for (size /= 2; size; size >>= 1)
1220 bits++;
1221
1222 /* Don't allocate too much */
1223 if (bits > FTRACE_HASH_MAX_BITS)
1224 bits = FTRACE_HASH_MAX_BITS;
1225
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001226 ret = -ENOMEM;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001227 new_hash = alloc_ftrace_hash(bits);
1228 if (!new_hash)
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001229 goto out;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001230
1231 size = 1 << src->size_bits;
1232 for (i = 0; i < size; i++) {
1233 hhd = &src->buckets[i];
1234 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1235 if (bits > 0)
1236 key = hash_long(entry->ip, bits);
1237 else
1238 key = 0;
1239 remove_hash_entry(src, entry);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001240 __add_hash_entry(new_hash, entry);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001241 }
1242 }
1243
Steven Rostedt07fd5512011-05-05 18:03:47 -04001244 old_hash = *dst;
1245 rcu_assign_pointer(*dst, new_hash);
1246 free_ftrace_hash_rcu(old_hash);
1247
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001248 ret = 0;
1249 out:
1250 /*
1251 * Enable regardless of ret:
1252 * On success, we enable the new hash.
1253 * On failure, we re-enable the original hash.
1254 */
1255 ftrace_hash_rec_enable(ops, enable);
1256
1257 return ret;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001258}
1259
Steven Rostedt265c8312009-02-13 12:43:56 -05001260/*
Steven Rostedtb8489142011-05-04 09:27:52 -04001261 * Test the hashes for this ops to see if we want to call
1262 * the ops->func or not.
1263 *
1264 * It's a match if the ip is in the ops->filter_hash or
1265 * the filter_hash does not exist or is empty,
1266 * AND
1267 * the ip is not in the ops->notrace_hash.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04001268 *
1269 * This needs to be called with preemption disabled as
1270 * the hashes are freed with call_rcu_sched().
Steven Rostedtb8489142011-05-04 09:27:52 -04001271 */
1272static int
1273ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1274{
1275 struct ftrace_hash *filter_hash;
1276 struct ftrace_hash *notrace_hash;
1277 int ret;
1278
Steven Rostedtb8489142011-05-04 09:27:52 -04001279 filter_hash = rcu_dereference_raw(ops->filter_hash);
1280 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1281
1282 if ((!filter_hash || !filter_hash->count ||
1283 ftrace_lookup_ip(filter_hash, ip)) &&
1284 (!notrace_hash || !notrace_hash->count ||
1285 !ftrace_lookup_ip(notrace_hash, ip)))
1286 ret = 1;
1287 else
1288 ret = 0;
Steven Rostedtb8489142011-05-04 09:27:52 -04001289
1290 return ret;
1291}
1292
1293/*
Steven Rostedt265c8312009-02-13 12:43:56 -05001294 * This is a double for. Do not use 'break' to break out of the loop,
1295 * you must use a goto.
1296 */
1297#define do_for_each_ftrace_rec(pg, rec) \
1298 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1299 int _____i; \
1300 for (_____i = 0; _____i < pg->index; _____i++) { \
1301 rec = &pg->records[_____i];
1302
1303#define while_for_each_ftrace_rec() \
1304 } \
1305 }
Abhishek Sagarecea6562008-06-21 23:47:53 +05301306
Steven Rostedted926f92011-05-03 13:25:24 -04001307static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1308 int filter_hash,
1309 bool inc)
1310{
1311 struct ftrace_hash *hash;
1312 struct ftrace_hash *other_hash;
1313 struct ftrace_page *pg;
1314 struct dyn_ftrace *rec;
1315 int count = 0;
1316 int all = 0;
1317
1318 /* Only update if the ops has been registered */
1319 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1320 return;
1321
1322 /*
1323 * In the filter_hash case:
1324 * If the count is zero, we update all records.
1325 * Otherwise we just update the items in the hash.
1326 *
1327 * In the notrace_hash case:
1328 * We enable the update in the hash.
1329 * As disabling notrace means enabling the tracing,
1330 * and enabling notrace means disabling, the inc variable
1331 * gets inversed.
1332 */
1333 if (filter_hash) {
1334 hash = ops->filter_hash;
1335 other_hash = ops->notrace_hash;
Steven Rostedtb8489142011-05-04 09:27:52 -04001336 if (!hash || !hash->count)
Steven Rostedted926f92011-05-03 13:25:24 -04001337 all = 1;
1338 } else {
1339 inc = !inc;
1340 hash = ops->notrace_hash;
1341 other_hash = ops->filter_hash;
1342 /*
1343 * If the notrace hash has no items,
1344 * then there's nothing to do.
1345 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001346 if (hash && !hash->count)
Steven Rostedted926f92011-05-03 13:25:24 -04001347 return;
1348 }
1349
1350 do_for_each_ftrace_rec(pg, rec) {
1351 int in_other_hash = 0;
1352 int in_hash = 0;
1353 int match = 0;
1354
1355 if (all) {
1356 /*
1357 * Only the filter_hash affects all records.
1358 * Update if the record is not in the notrace hash.
1359 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001360 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
Steven Rostedted926f92011-05-03 13:25:24 -04001361 match = 1;
1362 } else {
Steven Rostedtb8489142011-05-04 09:27:52 -04001363 in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1364 in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
Steven Rostedted926f92011-05-03 13:25:24 -04001365
1366 /*
1367 *
1368 */
1369 if (filter_hash && in_hash && !in_other_hash)
1370 match = 1;
1371 else if (!filter_hash && in_hash &&
1372 (in_other_hash || !other_hash->count))
1373 match = 1;
1374 }
1375 if (!match)
1376 continue;
1377
1378 if (inc) {
1379 rec->flags++;
1380 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1381 return;
1382 } else {
1383 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1384 return;
1385 rec->flags--;
1386 }
1387 count++;
1388 /* Shortcut, if we handled all records, we are done. */
1389 if (!all && count == hash->count)
1390 return;
1391 } while_for_each_ftrace_rec();
1392}
1393
1394static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1395 int filter_hash)
1396{
1397 __ftrace_hash_rec_update(ops, filter_hash, 0);
1398}
1399
1400static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1401 int filter_hash)
1402{
1403 __ftrace_hash_rec_update(ops, filter_hash, 1);
1404}
1405
Ingo Molnare309b412008-05-12 21:20:51 +02001406static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad50842008-05-12 21:20:48 +02001407{
Lai Jiangshanee000b72009-03-24 13:38:06 +08001408 rec->freelist = ftrace_free_records;
Steven Rostedt37ad50842008-05-12 21:20:48 +02001409 ftrace_free_records = rec;
1410 rec->flags |= FTRACE_FL_FREE;
1411}
1412
Ingo Molnare309b412008-05-12 21:20:51 +02001413static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001414{
Steven Rostedt37ad50842008-05-12 21:20:48 +02001415 struct dyn_ftrace *rec;
1416
1417 /* First check for freed records */
1418 if (ftrace_free_records) {
1419 rec = ftrace_free_records;
1420
Steven Rostedt37ad50842008-05-12 21:20:48 +02001421 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
Steven Rostedt6912896e2008-10-23 09:33:03 -04001422 FTRACE_WARN_ON_ONCE(1);
Steven Rostedt37ad50842008-05-12 21:20:48 +02001423 ftrace_free_records = NULL;
1424 return NULL;
1425 }
1426
Lai Jiangshanee000b72009-03-24 13:38:06 +08001427 ftrace_free_records = rec->freelist;
Steven Rostedt37ad50842008-05-12 21:20:48 +02001428 memset(rec, 0, sizeof(*rec));
1429 return rec;
1430 }
1431
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001432 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001433 if (!ftrace_pages->next) {
1434 /* allocate another page */
1435 ftrace_pages->next =
1436 (void *)get_zeroed_page(GFP_KERNEL);
1437 if (!ftrace_pages->next)
1438 return NULL;
1439 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001440 ftrace_pages = ftrace_pages->next;
1441 }
1442
1443 return &ftrace_pages->records[ftrace_pages->index++];
1444}
1445
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001446static struct dyn_ftrace *
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001447ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +02001448{
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001449 struct dyn_ftrace *rec;
Steven Rostedt3d083392008-05-12 21:20:42 +02001450
Steven Rostedtf3c7ac42008-11-14 16:21:19 -08001451 if (ftrace_disabled)
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001452 return NULL;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001453
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001454 rec = ftrace_alloc_dyn_node(ip);
1455 if (!rec)
1456 return NULL;
Steven Rostedt3d083392008-05-12 21:20:42 +02001457
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001458 rec->ip = ip;
Lai Jiangshanee000b72009-03-24 13:38:06 +08001459 rec->newlist = ftrace_new_addrs;
Lai Jiangshane94142a2009-03-13 17:51:27 +08001460 ftrace_new_addrs = rec;
Steven Rostedt3d083392008-05-12 21:20:42 +02001461
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001462 return rec;
Steven Rostedt3d083392008-05-12 21:20:42 +02001463}
1464
Steven Rostedt05736a42008-09-22 14:55:47 -07001465static void print_ip_ins(const char *fmt, unsigned char *p)
1466{
1467 int i;
1468
1469 printk(KERN_CONT "%s", fmt);
1470
1471 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1472 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1473}
1474
Steven Rostedt31e88902008-11-14 16:21:19 -08001475static void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001476{
1477 switch (failed) {
1478 case -EFAULT:
1479 FTRACE_WARN_ON_ONCE(1);
1480 pr_info("ftrace faulted on modifying ");
1481 print_ip_sym(ip);
1482 break;
1483 case -EINVAL:
1484 FTRACE_WARN_ON_ONCE(1);
1485 pr_info("ftrace failed to modify ");
1486 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001487 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001488 printk(KERN_CONT "\n");
1489 break;
1490 case -EPERM:
1491 FTRACE_WARN_ON_ONCE(1);
1492 pr_info("ftrace faulted on writing ");
1493 print_ip_sym(ip);
1494 break;
1495 default:
1496 FTRACE_WARN_ON_ONCE(1);
1497 pr_info("ftrace faulted on unknown error ");
1498 print_ip_sym(ip);
1499 }
1500}
1501
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001502
Masami Hiramatsu2cfa1972010-02-02 16:49:11 -05001503/* Return 1 if the address range is reserved for ftrace */
1504int ftrace_text_reserved(void *start, void *end)
1505{
1506 struct dyn_ftrace *rec;
1507 struct ftrace_page *pg;
1508
1509 do_for_each_ftrace_rec(pg, rec) {
1510 if (rec->ip <= (unsigned long)end &&
1511 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1512 return 1;
1513 } while_for_each_ftrace_rec();
1514 return 0;
1515}
1516
1517
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05301518static int
Steven Rostedt31e88902008-11-14 16:21:19 -08001519__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001520{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001521 unsigned long ftrace_addr;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001522 unsigned long flag = 0UL;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001523
Shaohua Lif0001202009-01-09 11:29:42 +08001524 ftrace_addr = (unsigned long)FTRACE_ADDR;
Steven Rostedt5072c592008-05-12 21:20:43 +02001525
Steven Rostedt982c3502008-11-15 16:31:41 -05001526 /*
Steven Rostedted926f92011-05-03 13:25:24 -04001527 * If we are enabling tracing:
Steven Rostedt982c3502008-11-15 16:31:41 -05001528 *
Steven Rostedted926f92011-05-03 13:25:24 -04001529 * If the record has a ref count, then we need to enable it
1530 * because someone is using it.
Steven Rostedt982c3502008-11-15 16:31:41 -05001531 *
Steven Rostedted926f92011-05-03 13:25:24 -04001532 * Otherwise we make sure its disabled.
1533 *
1534 * If we are disabling tracing, then disable all records that
1535 * are enabled.
Steven Rostedt982c3502008-11-15 16:31:41 -05001536 */
Steven Rostedted926f92011-05-03 13:25:24 -04001537 if (enable && (rec->flags & ~FTRACE_FL_MASK))
1538 flag = FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +02001539
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001540 /* If the state of this record hasn't changed, then do nothing */
1541 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1542 return 0;
1543
1544 if (flag) {
1545 rec->flags |= FTRACE_FL_ENABLED;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001546 return ftrace_make_call(rec, ftrace_addr);
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001547 }
1548
1549 rec->flags &= ~FTRACE_FL_ENABLED;
1550 return ftrace_make_nop(NULL, rec, ftrace_addr);
Steven Rostedt5072c592008-05-12 21:20:43 +02001551}
1552
1553static void ftrace_replace_code(int enable)
1554{
Steven Rostedt37ad50842008-05-12 21:20:48 +02001555 struct dyn_ftrace *rec;
1556 struct ftrace_page *pg;
Steven Rostedt6a24a242009-02-17 11:20:26 -05001557 int failed;
Steven Rostedt37ad50842008-05-12 21:20:48 +02001558
Steven Rostedt45a4a232011-04-21 23:16:46 -04001559 if (unlikely(ftrace_disabled))
1560 return;
1561
Steven Rostedt265c8312009-02-13 12:43:56 -05001562 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtd2c8c3e2011-04-25 14:32:42 -04001563 /* Skip over free records */
1564 if (rec->flags & FTRACE_FL_FREE)
Steven Rostedt265c8312009-02-13 12:43:56 -05001565 continue;
Steven Rostedt5072c592008-05-12 21:20:43 +02001566
Steven Rostedt265c8312009-02-13 12:43:56 -05001567 failed = __ftrace_replace_code(rec, enable);
Zhaoleifa9d13c2009-03-13 17:16:34 +08001568 if (failed) {
Steven Rostedt3279ba32009-10-07 16:57:56 -04001569 ftrace_bug(failed, rec->ip);
1570 /* Stop processing */
1571 return;
Steven Rostedt265c8312009-02-13 12:43:56 -05001572 }
1573 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001574}
1575
Ingo Molnare309b412008-05-12 21:20:51 +02001576static int
Steven Rostedt31e88902008-11-14 16:21:19 -08001577ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001578{
1579 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001580 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001581
1582 ip = rec->ip;
1583
Steven Rostedt45a4a232011-04-21 23:16:46 -04001584 if (unlikely(ftrace_disabled))
1585 return 0;
1586
Shaohua Li25aac9d2009-01-09 11:29:40 +08001587 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001588 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -08001589 ftrace_bug(ret, ip);
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05301590 return 0;
Steven Rostedt37ad50842008-05-12 21:20:48 +02001591 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05301592 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001593}
1594
Steven Rostedt000ab692009-02-17 13:35:06 -05001595/*
1596 * archs can override this function if they must do something
1597 * before the modifying code is performed.
1598 */
1599int __weak ftrace_arch_code_modify_prepare(void)
1600{
1601 return 0;
1602}
1603
1604/*
1605 * archs can override this function if they must do something
1606 * after the modifying code is performed.
1607 */
1608int __weak ftrace_arch_code_modify_post_process(void)
1609{
1610 return 0;
1611}
1612
Ingo Molnare309b412008-05-12 21:20:51 +02001613static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +02001614{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001615 int *command = data;
1616
Steven Rostedt6331c282011-07-13 15:11:02 -04001617 /*
1618 * Do not call function tracer while we update the code.
1619 * We are in stop machine, no worrying about races.
1620 */
1621 function_trace_stop++;
1622
Steven Rostedta3583242008-11-11 15:01:42 -05001623 if (*command & FTRACE_ENABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001624 ftrace_replace_code(1);
Steven Rostedta3583242008-11-11 15:01:42 -05001625 else if (*command & FTRACE_DISABLE_CALLS)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001626 ftrace_replace_code(0);
1627
1628 if (*command & FTRACE_UPDATE_TRACE_FUNC)
1629 ftrace_update_ftrace_func(ftrace_trace_function);
1630
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001631 if (*command & FTRACE_START_FUNC_RET)
1632 ftrace_enable_ftrace_graph_caller();
1633 else if (*command & FTRACE_STOP_FUNC_RET)
1634 ftrace_disable_ftrace_graph_caller();
1635
Steven Rostedt6331c282011-07-13 15:11:02 -04001636#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1637 /*
1638 * For archs that call ftrace_test_stop_func(), we must
1639 * wait till after we update all the function callers
1640 * before we update the callback. This keeps different
1641 * ops that record different functions from corrupting
1642 * each other.
1643 */
1644 __ftrace_trace_function = __ftrace_trace_function_delay;
1645#endif
1646 function_trace_stop--;
1647
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001648 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02001649}
1650
Ingo Molnare309b412008-05-12 21:20:51 +02001651static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001652{
Steven Rostedt000ab692009-02-17 13:35:06 -05001653 int ret;
1654
1655 ret = ftrace_arch_code_modify_prepare();
1656 FTRACE_WARN_ON(ret);
1657 if (ret)
1658 return;
1659
Rusty Russell784e2d72008-07-28 12:16:31 -05001660 stop_machine(__ftrace_modify_code, &command, NULL);
Steven Rostedt000ab692009-02-17 13:35:06 -05001661
1662 ret = ftrace_arch_code_modify_post_process();
1663 FTRACE_WARN_ON(ret);
Steven Rostedt3d083392008-05-12 21:20:42 +02001664}
1665
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001666static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001667static int ftrace_start_up;
Steven Rostedtb8489142011-05-04 09:27:52 -04001668static int global_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001669
1670static void ftrace_startup_enable(int command)
1671{
1672 if (saved_ftrace_func != ftrace_trace_function) {
1673 saved_ftrace_func = ftrace_trace_function;
1674 command |= FTRACE_UPDATE_TRACE_FUNC;
1675 }
1676
1677 if (!command || !ftrace_enabled)
1678 return;
1679
1680 ftrace_run_update_code(command);
1681}
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001682
Steven Rostedta1cd6172011-05-23 15:24:25 -04001683static int ftrace_startup(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001684{
Steven Rostedtb8489142011-05-04 09:27:52 -04001685 bool hash_enable = true;
1686
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001687 if (unlikely(ftrace_disabled))
Steven Rostedta1cd6172011-05-23 15:24:25 -04001688 return -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001689
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001690 ftrace_start_up++;
Steven Rostedt982c3502008-11-15 16:31:41 -05001691 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +02001692
Steven Rostedtb8489142011-05-04 09:27:52 -04001693 /* ops marked global share the filter hashes */
1694 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1695 ops = &global_ops;
1696 /* Don't update hash if global is already set */
1697 if (global_start_up)
1698 hash_enable = false;
1699 global_start_up++;
1700 }
1701
Steven Rostedted926f92011-05-03 13:25:24 -04001702 ops->flags |= FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04001703 if (hash_enable)
Steven Rostedted926f92011-05-03 13:25:24 -04001704 ftrace_hash_rec_enable(ops, 1);
1705
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001706 ftrace_startup_enable(command);
Steven Rostedta1cd6172011-05-23 15:24:25 -04001707
1708 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02001709}
1710
Steven Rostedtbd69c302011-05-03 21:55:54 -04001711static void ftrace_shutdown(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001712{
Steven Rostedtb8489142011-05-04 09:27:52 -04001713 bool hash_disable = true;
1714
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001715 if (unlikely(ftrace_disabled))
1716 return;
1717
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001718 ftrace_start_up--;
Frederic Weisbecker9ea1a152009-06-20 06:52:21 +02001719 /*
1720 * Just warn in case of unbalance, no need to kill ftrace, it's not
1721 * critical but the ftrace_call callers may be never nopped again after
1722 * further ftrace uses.
1723 */
1724 WARN_ON_ONCE(ftrace_start_up < 0);
1725
Steven Rostedtb8489142011-05-04 09:27:52 -04001726 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1727 ops = &global_ops;
1728 global_start_up--;
1729 WARN_ON_ONCE(global_start_up < 0);
1730 /* Don't update hash if global still has users */
1731 if (global_start_up) {
1732 WARN_ON_ONCE(!ftrace_start_up);
1733 hash_disable = false;
1734 }
1735 }
1736
1737 if (hash_disable)
Steven Rostedted926f92011-05-03 13:25:24 -04001738 ftrace_hash_rec_disable(ops, 1);
1739
Steven Rostedtb8489142011-05-04 09:27:52 -04001740 if (ops != &global_ops || !global_start_up)
Steven Rostedted926f92011-05-03 13:25:24 -04001741 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04001742
1743 if (!ftrace_start_up)
1744 command |= FTRACE_DISABLE_CALLS;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001745
1746 if (saved_ftrace_func != ftrace_trace_function) {
1747 saved_ftrace_func = ftrace_trace_function;
1748 command |= FTRACE_UPDATE_TRACE_FUNC;
1749 }
1750
1751 if (!command || !ftrace_enabled)
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001752 return;
Steven Rostedt3d083392008-05-12 21:20:42 +02001753
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001754 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +02001755}
1756
Ingo Molnare309b412008-05-12 21:20:51 +02001757static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001758{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001759 if (unlikely(ftrace_disabled))
1760 return;
1761
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001762 /* Force update next time */
1763 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001764 /* ftrace_start_up is true if we want ftrace running */
1765 if (ftrace_start_up)
Steven Rostedt79e406d2010-09-14 22:19:46 -04001766 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001767}
1768
Ingo Molnare309b412008-05-12 21:20:51 +02001769static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001770{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001771 if (unlikely(ftrace_disabled))
1772 return;
1773
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001774 /* ftrace_start_up is true if ftrace is running */
1775 if (ftrace_start_up)
Steven Rostedt79e406d2010-09-14 22:19:46 -04001776 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001777}
1778
Steven Rostedt3d083392008-05-12 21:20:42 +02001779static cycle_t ftrace_update_time;
1780static unsigned long ftrace_update_cnt;
1781unsigned long ftrace_update_tot_cnt;
1782
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001783static int ops_traces_mod(struct ftrace_ops *ops)
1784{
1785 struct ftrace_hash *hash;
1786
1787 hash = ops->filter_hash;
1788 return !!(!hash || !hash->count);
1789}
1790
Steven Rostedt31e88902008-11-14 16:21:19 -08001791static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +02001792{
Lai Jiangshane94142a2009-03-13 17:51:27 +08001793 struct dyn_ftrace *p;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05301794 cycle_t start, stop;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001795 unsigned long ref = 0;
1796
1797 /*
1798 * When adding a module, we need to check if tracers are
1799 * currently enabled and if they are set to trace all functions.
1800 * If they are, we need to enable the module functions as well
1801 * as update the reference counts for those function records.
1802 */
1803 if (mod) {
1804 struct ftrace_ops *ops;
1805
1806 for (ops = ftrace_ops_list;
1807 ops != &ftrace_list_end; ops = ops->next) {
1808 if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1809 ops_traces_mod(ops))
1810 ref++;
1811 }
1812 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001813
Ingo Molnar750ed1a2008-05-12 21:20:46 +02001814 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02001815 ftrace_update_cnt = 0;
1816
Lai Jiangshane94142a2009-03-13 17:51:27 +08001817 while (ftrace_new_addrs) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05301818
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001819 /* If something went wrong, bail without enabling anything */
1820 if (unlikely(ftrace_disabled))
1821 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +02001822
Lai Jiangshane94142a2009-03-13 17:51:27 +08001823 p = ftrace_new_addrs;
Lai Jiangshanee000b72009-03-24 13:38:06 +08001824 ftrace_new_addrs = p->newlist;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001825 p->flags = ref;
Abhishek Sagar0eb96702008-06-01 21:47:30 +05301826
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001827 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001828 * Do the initial record conversion from mcount jump
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001829 * to the NOP instructions.
1830 */
1831 if (!ftrace_code_disable(mod, p)) {
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001832 ftrace_free_rec(p);
Steven Rostedtd2c8c3e2011-04-25 14:32:42 -04001833 /* Game over */
1834 break;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001835 }
1836
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001837 ftrace_update_cnt++;
1838
1839 /*
1840 * If the tracing is enabled, go ahead and enable the record.
1841 *
1842 * The reason not to enable the record immediatelly is the
1843 * inherent check of ftrace_make_nop/ftrace_make_call for
1844 * correct previous instructions. Making first the NOP
1845 * conversion puts the module to the correct state, thus
1846 * passing the ftrace_make_call check.
1847 */
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001848 if (ftrace_start_up && ref) {
Jiri Olsa5cb084b2009-10-13 16:33:53 -04001849 int failed = __ftrace_replace_code(p, 1);
1850 if (failed) {
1851 ftrace_bug(failed, p->ip);
1852 ftrace_free_rec(p);
1853 }
1854 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001855 }
1856
Ingo Molnar750ed1a2008-05-12 21:20:46 +02001857 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02001858 ftrace_update_time = stop - start;
1859 ftrace_update_tot_cnt += ftrace_update_cnt;
1860
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001861 return 0;
1862}
1863
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001864static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001865{
1866 struct ftrace_page *pg;
1867 int cnt;
1868 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001869
1870 /* allocate a few pages */
1871 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1872 if (!ftrace_pages_start)
1873 return -1;
1874
1875 /*
1876 * Allocate a few more pages.
1877 *
1878 * TODO: have some parser search vmlinux before
1879 * final linking to find all calls to ftrace.
1880 * Then we can:
1881 * a) know how many pages to allocate.
1882 * and/or
1883 * b) set up the table then.
1884 *
1885 * The dynamic code is still necessary for
1886 * modules.
1887 */
1888
1889 pg = ftrace_pages = ftrace_pages_start;
1890
Steven Rostedt68bf21a2008-08-14 15:45:08 -04001891 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -04001892 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +08001893 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001894
1895 for (i = 0; i < cnt; i++) {
1896 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1897
1898 /* If we fail, we'll try later anyway */
1899 if (!pg->next)
1900 break;
1901
1902 pg = pg->next;
1903 }
1904
1905 return 0;
1906}
1907
Steven Rostedt5072c592008-05-12 21:20:43 +02001908enum {
1909 FTRACE_ITER_FILTER = (1 << 0),
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02001910 FTRACE_ITER_NOTRACE = (1 << 1),
Steven Rostedt3499e462011-04-21 22:59:12 -04001911 FTRACE_ITER_PRINTALL = (1 << 2),
1912 FTRACE_ITER_HASH = (1 << 3),
Steven Rostedt647bcd02011-05-03 14:39:21 -04001913 FTRACE_ITER_ENABLED = (1 << 4),
Steven Rostedt5072c592008-05-12 21:20:43 +02001914};
1915
1916#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1917
1918struct ftrace_iterator {
Steven Rostedt98c4fd02010-09-10 11:47:43 -04001919 loff_t pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001920 loff_t func_pos;
1921 struct ftrace_page *pg;
1922 struct dyn_ftrace *func;
1923 struct ftrace_func_probe *probe;
1924 struct trace_parser parser;
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04001925 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001926 struct ftrace_ops *ops;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001927 int hidx;
1928 int idx;
1929 unsigned flags;
Steven Rostedt5072c592008-05-12 21:20:43 +02001930};
1931
Ingo Molnare309b412008-05-12 21:20:51 +02001932static void *
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001933t_hash_next(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001934{
1935 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001936 struct hlist_node *hnd = NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001937 struct hlist_head *hhd;
1938
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001939 (*pos)++;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04001940 iter->pos = *pos;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001941
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001942 if (iter->probe)
1943 hnd = &iter->probe->node;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001944 retry:
1945 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1946 return NULL;
1947
1948 hhd = &ftrace_func_hash[iter->hidx];
1949
1950 if (hlist_empty(hhd)) {
1951 iter->hidx++;
1952 hnd = NULL;
1953 goto retry;
1954 }
1955
1956 if (!hnd)
1957 hnd = hhd->first;
1958 else {
1959 hnd = hnd->next;
1960 if (!hnd) {
1961 iter->hidx++;
1962 goto retry;
1963 }
1964 }
1965
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001966 if (WARN_ON_ONCE(!hnd))
1967 return NULL;
1968
1969 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1970
1971 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001972}
1973
1974static void *t_hash_start(struct seq_file *m, loff_t *pos)
1975{
1976 struct ftrace_iterator *iter = m->private;
1977 void *p = NULL;
Li Zefand82d6242009-06-24 09:54:54 +08001978 loff_t l;
1979
Steven Rostedt2bccfff2010-09-09 08:43:22 -04001980 if (iter->func_pos > *pos)
1981 return NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001982
Li Zefand82d6242009-06-24 09:54:54 +08001983 iter->hidx = 0;
Steven Rostedt2bccfff2010-09-09 08:43:22 -04001984 for (l = 0; l <= (*pos - iter->func_pos); ) {
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001985 p = t_hash_next(m, &l);
Li Zefand82d6242009-06-24 09:54:54 +08001986 if (!p)
1987 break;
1988 }
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001989 if (!p)
1990 return NULL;
1991
Steven Rostedt98c4fd02010-09-10 11:47:43 -04001992 /* Only set this if we have an item */
1993 iter->flags |= FTRACE_ITER_HASH;
1994
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001995 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001996}
1997
Steven Rostedt4aeb6962010-09-09 10:00:28 -04001998static int
1999t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002000{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002001 struct ftrace_func_probe *rec;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002002
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002003 rec = iter->probe;
2004 if (WARN_ON_ONCE(!rec))
2005 return -EIO;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002006
Steven Rostedt809dcf22009-02-16 23:06:01 -05002007 if (rec->ops->print)
2008 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2009
Steven Rostedtb375a112009-09-17 00:05:58 -04002010 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002011
2012 if (rec->data)
2013 seq_printf(m, ":%p", rec->data);
2014 seq_putc(m, '\n');
2015
2016 return 0;
2017}
2018
2019static void *
Steven Rostedt5072c592008-05-12 21:20:43 +02002020t_next(struct seq_file *m, void *v, loff_t *pos)
2021{
2022 struct ftrace_iterator *iter = m->private;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002023 struct ftrace_ops *ops = &global_ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02002024 struct dyn_ftrace *rec = NULL;
2025
Steven Rostedt45a4a232011-04-21 23:16:46 -04002026 if (unlikely(ftrace_disabled))
2027 return NULL;
2028
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002029 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002030 return t_hash_next(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002031
Steven Rostedt5072c592008-05-12 21:20:43 +02002032 (*pos)++;
Jiri Olsa1106b692011-02-16 17:35:34 +01002033 iter->pos = iter->func_pos = *pos;
Steven Rostedt5072c592008-05-12 21:20:43 +02002034
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002035 if (iter->flags & FTRACE_ITER_PRINTALL)
Steven Rostedt57c072c2010-09-14 11:21:11 -04002036 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002037
Steven Rostedt5072c592008-05-12 21:20:43 +02002038 retry:
2039 if (iter->idx >= iter->pg->index) {
2040 if (iter->pg->next) {
2041 iter->pg = iter->pg->next;
2042 iter->idx = 0;
2043 goto retry;
2044 }
2045 } else {
2046 rec = &iter->pg->records[iter->idx++];
Steven Rostedta9fdda32008-08-14 22:47:17 -04002047 if ((rec->flags & FTRACE_FL_FREE) ||
2048
Steven Rostedt0183fb1c2008-11-07 22:36:02 -05002049 ((iter->flags & FTRACE_ITER_FILTER) &&
Steven Rostedtf45948e2011-05-02 12:29:25 -04002050 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
Steven Rostedt0183fb1c2008-11-07 22:36:02 -05002051
Steven Rostedt41c52c02008-05-22 11:46:33 -04002052 ((iter->flags & FTRACE_ITER_NOTRACE) &&
Steven Rostedt647bcd02011-05-03 14:39:21 -04002053 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2054
2055 ((iter->flags & FTRACE_ITER_ENABLED) &&
2056 !(rec->flags & ~FTRACE_FL_MASK))) {
2057
Steven Rostedt5072c592008-05-12 21:20:43 +02002058 rec = NULL;
2059 goto retry;
2060 }
2061 }
2062
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002063 if (!rec)
Steven Rostedt57c072c2010-09-14 11:21:11 -04002064 return t_hash_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002065
2066 iter->func = rec;
2067
2068 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002069}
2070
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002071static void reset_iter_read(struct ftrace_iterator *iter)
2072{
2073 iter->pos = 0;
2074 iter->func_pos = 0;
2075 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
Steven Rostedt5072c592008-05-12 21:20:43 +02002076}
2077
2078static void *t_start(struct seq_file *m, loff_t *pos)
2079{
2080 struct ftrace_iterator *iter = m->private;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002081 struct ftrace_ops *ops = &global_ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02002082 void *p = NULL;
Li Zefan694ce0a2009-06-24 09:54:19 +08002083 loff_t l;
Steven Rostedt5072c592008-05-12 21:20:43 +02002084
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002085 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04002086
2087 if (unlikely(ftrace_disabled))
2088 return NULL;
2089
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002090 /*
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002091 * If an lseek was done, then reset and start from beginning.
2092 */
2093 if (*pos < iter->pos)
2094 reset_iter_read(iter);
2095
2096 /*
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002097 * For set_ftrace_filter reading, if we have the filter
2098 * off, we can short cut and just print out that all
2099 * functions are enabled.
2100 */
Steven Rostedtf45948e2011-05-02 12:29:25 -04002101 if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002102 if (*pos > 0)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002103 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002104 iter->flags |= FTRACE_ITER_PRINTALL;
Chris Wrightdf091622010-09-09 16:34:59 -07002105 /* reset in case of seek/pread */
2106 iter->flags &= ~FTRACE_ITER_HASH;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002107 return iter;
2108 }
2109
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002110 if (iter->flags & FTRACE_ITER_HASH)
2111 return t_hash_start(m, pos);
2112
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002113 /*
2114 * Unfortunately, we need to restart at ftrace_pages_start
2115 * every time we let go of the ftrace_mutex. This is because
2116 * those pointers can change without the lock.
2117 */
Li Zefan694ce0a2009-06-24 09:54:19 +08002118 iter->pg = ftrace_pages_start;
2119 iter->idx = 0;
2120 for (l = 0; l <= *pos; ) {
2121 p = t_next(m, p, &l);
2122 if (!p)
2123 break;
Liming Wang50cdaf02008-11-28 12:13:21 +08002124 }
walimis5821e1b2008-11-15 15:19:06 +08002125
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002126 if (!p) {
2127 if (iter->flags & FTRACE_ITER_FILTER)
2128 return t_hash_start(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002129
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002130 return NULL;
2131 }
2132
2133 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002134}
2135
2136static void t_stop(struct seq_file *m, void *p)
2137{
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002138 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002139}
2140
2141static int t_show(struct seq_file *m, void *v)
2142{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002143 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002144 struct dyn_ftrace *rec;
Steven Rostedt5072c592008-05-12 21:20:43 +02002145
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002146 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002147 return t_hash_show(m, iter);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002148
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002149 if (iter->flags & FTRACE_ITER_PRINTALL) {
2150 seq_printf(m, "#### all functions enabled ####\n");
2151 return 0;
2152 }
2153
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002154 rec = iter->func;
2155
Steven Rostedt5072c592008-05-12 21:20:43 +02002156 if (!rec)
2157 return 0;
2158
Steven Rostedt647bcd02011-05-03 14:39:21 -04002159 seq_printf(m, "%ps", (void *)rec->ip);
2160 if (iter->flags & FTRACE_ITER_ENABLED)
2161 seq_printf(m, " (%ld)",
2162 rec->flags & ~FTRACE_FL_MASK);
2163 seq_printf(m, "\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02002164
2165 return 0;
2166}
2167
James Morris88e9d342009-09-22 16:43:43 -07002168static const struct seq_operations show_ftrace_seq_ops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02002169 .start = t_start,
2170 .next = t_next,
2171 .stop = t_stop,
2172 .show = t_show,
2173};
2174
Ingo Molnare309b412008-05-12 21:20:51 +02002175static int
Steven Rostedt5072c592008-05-12 21:20:43 +02002176ftrace_avail_open(struct inode *inode, struct file *file)
2177{
2178 struct ftrace_iterator *iter;
2179 int ret;
2180
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002181 if (unlikely(ftrace_disabled))
2182 return -ENODEV;
2183
Steven Rostedt5072c592008-05-12 21:20:43 +02002184 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2185 if (!iter)
2186 return -ENOMEM;
2187
2188 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02002189
2190 ret = seq_open(file, &show_ftrace_seq_ops);
2191 if (!ret) {
2192 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002193
Steven Rostedt5072c592008-05-12 21:20:43 +02002194 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002195 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +02002196 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002197 }
Steven Rostedt5072c592008-05-12 21:20:43 +02002198
2199 return ret;
2200}
2201
Steven Rostedt647bcd02011-05-03 14:39:21 -04002202static int
2203ftrace_enabled_open(struct inode *inode, struct file *file)
2204{
2205 struct ftrace_iterator *iter;
2206 int ret;
2207
2208 if (unlikely(ftrace_disabled))
2209 return -ENODEV;
2210
2211 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2212 if (!iter)
2213 return -ENOMEM;
2214
2215 iter->pg = ftrace_pages_start;
2216 iter->flags = FTRACE_ITER_ENABLED;
2217
2218 ret = seq_open(file, &show_ftrace_seq_ops);
2219 if (!ret) {
2220 struct seq_file *m = file->private_data;
2221
2222 m->private = iter;
2223 } else {
2224 kfree(iter);
2225 }
2226
2227 return ret;
2228}
2229
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002230static void ftrace_filter_reset(struct ftrace_hash *hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02002231{
Steven Rostedt52baf112009-02-14 01:15:39 -05002232 mutex_lock(&ftrace_lock);
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002233 ftrace_hash_clear(hash);
Steven Rostedt52baf112009-02-14 01:15:39 -05002234 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002235}
2236
Ingo Molnare309b412008-05-12 21:20:51 +02002237static int
Steven Rostedtf45948e2011-05-02 12:29:25 -04002238ftrace_regex_open(struct ftrace_ops *ops, int flag,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002239 struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02002240{
2241 struct ftrace_iterator *iter;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002242 struct ftrace_hash *hash;
Steven Rostedt5072c592008-05-12 21:20:43 +02002243 int ret = 0;
2244
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002245 if (unlikely(ftrace_disabled))
2246 return -ENODEV;
2247
Steven Rostedt5072c592008-05-12 21:20:43 +02002248 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2249 if (!iter)
2250 return -ENOMEM;
2251
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002252 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2253 kfree(iter);
2254 return -ENOMEM;
2255 }
2256
Steven Rostedtf45948e2011-05-02 12:29:25 -04002257 if (flag & FTRACE_ITER_NOTRACE)
2258 hash = ops->notrace_hash;
2259 else
2260 hash = ops->filter_hash;
2261
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002262 iter->ops = ops;
2263 iter->flags = flag;
2264
2265 if (file->f_mode & FMODE_WRITE) {
2266 mutex_lock(&ftrace_lock);
2267 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2268 mutex_unlock(&ftrace_lock);
2269
2270 if (!iter->hash) {
2271 trace_parser_put(&iter->parser);
2272 kfree(iter);
2273 return -ENOMEM;
2274 }
2275 }
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002276
Steven Rostedt41c52c02008-05-22 11:46:33 -04002277 mutex_lock(&ftrace_regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002278
Steven Rostedt5072c592008-05-12 21:20:43 +02002279 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04002280 (file->f_flags & O_TRUNC))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002281 ftrace_filter_reset(iter->hash);
Steven Rostedt5072c592008-05-12 21:20:43 +02002282
2283 if (file->f_mode & FMODE_READ) {
2284 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02002285
2286 ret = seq_open(file, &show_ftrace_seq_ops);
2287 if (!ret) {
2288 struct seq_file *m = file->private_data;
2289 m->private = iter;
Li Zefan79fe2492009-09-22 13:54:28 +08002290 } else {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002291 /* Failed */
2292 free_ftrace_hash(iter->hash);
Li Zefan79fe2492009-09-22 13:54:28 +08002293 trace_parser_put(&iter->parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02002294 kfree(iter);
Li Zefan79fe2492009-09-22 13:54:28 +08002295 }
Steven Rostedt5072c592008-05-12 21:20:43 +02002296 } else
2297 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04002298 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002299
2300 return ret;
2301}
2302
Steven Rostedt41c52c02008-05-22 11:46:33 -04002303static int
2304ftrace_filter_open(struct inode *inode, struct file *file)
2305{
Steven Rostedtf45948e2011-05-02 12:29:25 -04002306 return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002307 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002308}
2309
2310static int
2311ftrace_notrace_open(struct inode *inode, struct file *file)
2312{
Steven Rostedtf45948e2011-05-02 12:29:25 -04002313 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002314 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002315}
2316
Ingo Molnare309b412008-05-12 21:20:51 +02002317static loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04002318ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02002319{
2320 loff_t ret;
2321
2322 if (file->f_mode & FMODE_READ)
2323 ret = seq_lseek(file, offset, origin);
2324 else
2325 file->f_pos = ret = 1;
2326
2327 return ret;
2328}
2329
Steven Rostedt64e7c442009-02-13 17:08:48 -05002330static int ftrace_match(char *str, char *regex, int len, int type)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002331{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002332 int matched = 0;
Li Zefan751e9982010-01-14 10:53:02 +08002333 int slen;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002334
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002335 switch (type) {
2336 case MATCH_FULL:
2337 if (strcmp(str, regex) == 0)
2338 matched = 1;
2339 break;
2340 case MATCH_FRONT_ONLY:
2341 if (strncmp(str, regex, len) == 0)
2342 matched = 1;
2343 break;
2344 case MATCH_MIDDLE_ONLY:
2345 if (strstr(str, regex))
2346 matched = 1;
2347 break;
2348 case MATCH_END_ONLY:
Li Zefan751e9982010-01-14 10:53:02 +08002349 slen = strlen(str);
2350 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002351 matched = 1;
2352 break;
2353 }
2354
2355 return matched;
2356}
2357
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002358static int
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002359enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
Steven Rostedt996e87b2011-04-26 16:11:03 -04002360{
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002361 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002362 int ret = 0;
2363
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002364 entry = ftrace_lookup_ip(hash, rec->ip);
2365 if (not) {
2366 /* Do nothing if it doesn't exist */
2367 if (!entry)
2368 return 0;
2369
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002370 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002371 } else {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002372 /* Do nothing if it exists */
2373 if (entry)
2374 return 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002375
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002376 ret = add_hash_entry(hash, rec->ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002377 }
2378 return ret;
Steven Rostedt996e87b2011-04-26 16:11:03 -04002379}
2380
Steven Rostedt64e7c442009-02-13 17:08:48 -05002381static int
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002382ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2383 char *regex, int len, int type)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002384{
2385 char str[KSYM_SYMBOL_LEN];
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002386 char *modname;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002387
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002388 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2389
2390 if (mod) {
2391 /* module lookup requires matching the module */
2392 if (!modname || strcmp(modname, mod))
2393 return 0;
2394
2395 /* blank search means to match all funcs in the mod */
2396 if (!len)
2397 return 1;
2398 }
2399
Steven Rostedt64e7c442009-02-13 17:08:48 -05002400 return ftrace_match(str, regex, len, type);
2401}
2402
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002403static int
2404match_records(struct ftrace_hash *hash, char *buff,
2405 int len, char *mod, int not)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002406{
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002407 unsigned search_len = 0;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002408 struct ftrace_page *pg;
2409 struct dyn_ftrace *rec;
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002410 int type = MATCH_FULL;
2411 char *search = buff;
Li Zefan311d16d2009-12-08 11:15:11 +08002412 int found = 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002413 int ret;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002414
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002415 if (len) {
2416 type = filter_parse_regex(buff, len, &search, &not);
2417 search_len = strlen(search);
2418 }
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002419
Steven Rostedt52baf112009-02-14 01:15:39 -05002420 mutex_lock(&ftrace_lock);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002421
2422 if (unlikely(ftrace_disabled))
2423 goto out_unlock;
2424
Steven Rostedt265c8312009-02-13 12:43:56 -05002425 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedt5072c592008-05-12 21:20:43 +02002426
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002427 if (ftrace_match_record(rec, mod, search, search_len, type)) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002428 ret = enter_record(hash, rec, not);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002429 if (ret < 0) {
2430 found = ret;
2431 goto out_unlock;
2432 }
Li Zefan311d16d2009-12-08 11:15:11 +08002433 found = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05002434 }
2435 } while_for_each_ftrace_rec();
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002436 out_unlock:
Steven Rostedt52baf112009-02-14 01:15:39 -05002437 mutex_unlock(&ftrace_lock);
Li Zefan311d16d2009-12-08 11:15:11 +08002438
2439 return found;
Steven Rostedt5072c592008-05-12 21:20:43 +02002440}
2441
Steven Rostedt64e7c442009-02-13 17:08:48 -05002442static int
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002443ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002444{
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002445 return match_records(hash, buff, len, NULL, 0);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002446}
2447
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002448static int
2449ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002450{
Steven Rostedt64e7c442009-02-13 17:08:48 -05002451 int not = 0;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002452
Steven Rostedt64e7c442009-02-13 17:08:48 -05002453 /* blank or '*' mean the same */
2454 if (strcmp(buff, "*") == 0)
2455 buff[0] = 0;
2456
2457 /* handle the case of 'dont filter this module' */
2458 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2459 buff[0] = 0;
2460 not = 1;
2461 }
2462
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002463 return match_records(hash, buff, strlen(buff), mod, not);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002464}
2465
Steven Rostedtf6180772009-02-14 00:40:25 -05002466/*
2467 * We register the module command as a template to show others how
2468 * to register the a command as well.
2469 */
2470
2471static int
Steven Rostedt43dd61c2011-07-07 11:09:22 -04002472ftrace_mod_callback(struct ftrace_hash *hash,
2473 char *func, char *cmd, char *param, int enable)
Steven Rostedtf6180772009-02-14 00:40:25 -05002474{
2475 char *mod;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002476 int ret = -EINVAL;
Steven Rostedtf6180772009-02-14 00:40:25 -05002477
2478 /*
2479 * cmd == 'mod' because we only registered this func
2480 * for the 'mod' ftrace_func_command.
2481 * But if you register one func with multiple commands,
2482 * you can tell which command was used by the cmd
2483 * parameter.
2484 */
2485
2486 /* we must have a module name */
2487 if (!param)
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002488 return ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05002489
2490 mod = strsep(&param, ":");
2491 if (!strlen(mod))
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002492 return ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05002493
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002494 ret = ftrace_match_module_records(hash, func, mod);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002495 if (!ret)
2496 ret = -EINVAL;
2497 if (ret < 0)
2498 return ret;
2499
2500 return 0;
Steven Rostedtf6180772009-02-14 00:40:25 -05002501}
2502
2503static struct ftrace_func_command ftrace_mod_cmd = {
2504 .name = "mod",
2505 .func = ftrace_mod_callback,
2506};
2507
2508static int __init ftrace_mod_cmd_init(void)
2509{
2510 return register_ftrace_command(&ftrace_mod_cmd);
2511}
2512device_initcall(ftrace_mod_cmd_init);
2513
Steven Rostedt59df055f2009-02-14 15:29:06 -05002514static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002515function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002516{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002517 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002518 struct hlist_head *hhd;
2519 struct hlist_node *n;
2520 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002521
2522 key = hash_long(ip, FTRACE_HASH_BITS);
2523
2524 hhd = &ftrace_func_hash[key];
2525
2526 if (hlist_empty(hhd))
2527 return;
2528
2529 /*
2530 * Disable preemption for these calls to prevent a RCU grace
2531 * period. This syncs the hash iteration and freeing of items
2532 * on the hash. rcu_read_lock is too dangerous here.
2533 */
Steven Rostedt5168ae52010-06-03 09:36:50 -04002534 preempt_disable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002535 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2536 if (entry->ip == ip)
2537 entry->ops->func(ip, parent_ip, &entry->data);
2538 }
Steven Rostedt5168ae52010-06-03 09:36:50 -04002539 preempt_enable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002540}
2541
Steven Rostedtb6887d72009-02-17 12:32:04 -05002542static struct ftrace_ops trace_probe_ops __read_mostly =
Steven Rostedt59df055f2009-02-14 15:29:06 -05002543{
Steven Rostedtfb9fb012009-03-25 13:26:41 -04002544 .func = function_trace_probe_call,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002545};
2546
Steven Rostedtb6887d72009-02-17 12:32:04 -05002547static int ftrace_probe_registered;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002548
Steven Rostedtb6887d72009-02-17 12:32:04 -05002549static void __enable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002550{
Steven Rostedtb8489142011-05-04 09:27:52 -04002551 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002552 int i;
2553
Steven Rostedtb6887d72009-02-17 12:32:04 -05002554 if (ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002555 return;
2556
2557 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2558 struct hlist_head *hhd = &ftrace_func_hash[i];
2559 if (hhd->first)
2560 break;
2561 }
2562 /* Nothing registered? */
2563 if (i == FTRACE_FUNC_HASHSIZE)
2564 return;
2565
Steven Rostedtb8489142011-05-04 09:27:52 -04002566 ret = __register_ftrace_function(&trace_probe_ops);
2567 if (!ret)
Steven Rostedta1cd6172011-05-23 15:24:25 -04002568 ret = ftrace_startup(&trace_probe_ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04002569
Steven Rostedtb6887d72009-02-17 12:32:04 -05002570 ftrace_probe_registered = 1;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002571}
2572
Steven Rostedtb6887d72009-02-17 12:32:04 -05002573static void __disable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002574{
Steven Rostedtb8489142011-05-04 09:27:52 -04002575 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002576 int i;
2577
Steven Rostedtb6887d72009-02-17 12:32:04 -05002578 if (!ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002579 return;
2580
2581 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2582 struct hlist_head *hhd = &ftrace_func_hash[i];
2583 if (hhd->first)
2584 return;
2585 }
2586
2587 /* no more funcs left */
Steven Rostedtb8489142011-05-04 09:27:52 -04002588 ret = __unregister_ftrace_function(&trace_probe_ops);
2589 if (!ret)
2590 ftrace_shutdown(&trace_probe_ops, 0);
2591
Steven Rostedtb6887d72009-02-17 12:32:04 -05002592 ftrace_probe_registered = 0;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002593}
2594
2595
2596static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2597{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002598 struct ftrace_func_probe *entry =
2599 container_of(rhp, struct ftrace_func_probe, rcu);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002600
2601 if (entry->ops->free)
2602 entry->ops->free(&entry->data);
2603 kfree(entry);
2604}
2605
2606
2607int
Steven Rostedtb6887d72009-02-17 12:32:04 -05002608register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002609 void *data)
2610{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002611 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002612 struct ftrace_page *pg;
2613 struct dyn_ftrace *rec;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002614 int type, len, not;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002615 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002616 int count = 0;
2617 char *search;
2618
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02002619 type = filter_parse_regex(glob, strlen(glob), &search, &not);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002620 len = strlen(search);
2621
Steven Rostedtb6887d72009-02-17 12:32:04 -05002622 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002623 if (WARN_ON(not))
2624 return -EINVAL;
2625
2626 mutex_lock(&ftrace_lock);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002627
Steven Rostedt45a4a232011-04-21 23:16:46 -04002628 if (unlikely(ftrace_disabled))
2629 goto out_unlock;
2630
Steven Rostedt59df055f2009-02-14 15:29:06 -05002631 do_for_each_ftrace_rec(pg, rec) {
2632
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002633 if (!ftrace_match_record(rec, NULL, search, len, type))
Steven Rostedt59df055f2009-02-14 15:29:06 -05002634 continue;
2635
2636 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2637 if (!entry) {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002638 /* If we did not process any, then return error */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002639 if (!count)
2640 count = -ENOMEM;
2641 goto out_unlock;
2642 }
2643
2644 count++;
2645
2646 entry->data = data;
2647
2648 /*
2649 * The caller might want to do something special
2650 * for each function we find. We call the callback
2651 * to give the caller an opportunity to do so.
2652 */
2653 if (ops->callback) {
2654 if (ops->callback(rec->ip, &entry->data) < 0) {
2655 /* caller does not like this func */
2656 kfree(entry);
2657 continue;
2658 }
2659 }
2660
2661 entry->ops = ops;
2662 entry->ip = rec->ip;
2663
2664 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2665 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2666
2667 } while_for_each_ftrace_rec();
Steven Rostedtb6887d72009-02-17 12:32:04 -05002668 __enable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002669
2670 out_unlock:
2671 mutex_unlock(&ftrace_lock);
2672
2673 return count;
2674}
2675
2676enum {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002677 PROBE_TEST_FUNC = 1,
2678 PROBE_TEST_DATA = 2
Steven Rostedt59df055f2009-02-14 15:29:06 -05002679};
2680
2681static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002682__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002683 void *data, int flags)
2684{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002685 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002686 struct hlist_node *n, *tmp;
2687 char str[KSYM_SYMBOL_LEN];
2688 int type = MATCH_FULL;
2689 int i, len = 0;
2690 char *search;
2691
Atsushi Tsujib36461d2009-09-15 19:06:30 +09002692 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
Steven Rostedt59df055f2009-02-14 15:29:06 -05002693 glob = NULL;
Atsushi Tsujib36461d2009-09-15 19:06:30 +09002694 else if (glob) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05002695 int not;
2696
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02002697 type = filter_parse_regex(glob, strlen(glob), &search, &not);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002698 len = strlen(search);
2699
Steven Rostedtb6887d72009-02-17 12:32:04 -05002700 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002701 if (WARN_ON(not))
2702 return;
2703 }
2704
2705 mutex_lock(&ftrace_lock);
2706 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2707 struct hlist_head *hhd = &ftrace_func_hash[i];
2708
2709 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2710
2711 /* break up if statements for readability */
Steven Rostedtb6887d72009-02-17 12:32:04 -05002712 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002713 continue;
2714
Steven Rostedtb6887d72009-02-17 12:32:04 -05002715 if ((flags & PROBE_TEST_DATA) && entry->data != data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002716 continue;
2717
2718 /* do this last, since it is the most expensive */
2719 if (glob) {
2720 kallsyms_lookup(entry->ip, NULL, NULL,
2721 NULL, str);
2722 if (!ftrace_match(str, glob, len, type))
2723 continue;
2724 }
2725
2726 hlist_del(&entry->node);
2727 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2728 }
2729 }
Steven Rostedtb6887d72009-02-17 12:32:04 -05002730 __disable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002731 mutex_unlock(&ftrace_lock);
2732}
2733
2734void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002735unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002736 void *data)
2737{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002738 __unregister_ftrace_function_probe(glob, ops, data,
2739 PROBE_TEST_FUNC | PROBE_TEST_DATA);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002740}
2741
2742void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002743unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002744{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002745 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002746}
2747
Steven Rostedtb6887d72009-02-17 12:32:04 -05002748void unregister_ftrace_function_probe_all(char *glob)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002749{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002750 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002751}
2752
Steven Rostedtf6180772009-02-14 00:40:25 -05002753static LIST_HEAD(ftrace_commands);
2754static DEFINE_MUTEX(ftrace_cmd_mutex);
2755
2756int register_ftrace_command(struct ftrace_func_command *cmd)
2757{
2758 struct ftrace_func_command *p;
2759 int ret = 0;
2760
2761 mutex_lock(&ftrace_cmd_mutex);
2762 list_for_each_entry(p, &ftrace_commands, list) {
2763 if (strcmp(cmd->name, p->name) == 0) {
2764 ret = -EBUSY;
2765 goto out_unlock;
2766 }
2767 }
2768 list_add(&cmd->list, &ftrace_commands);
2769 out_unlock:
2770 mutex_unlock(&ftrace_cmd_mutex);
2771
2772 return ret;
2773}
2774
2775int unregister_ftrace_command(struct ftrace_func_command *cmd)
2776{
2777 struct ftrace_func_command *p, *n;
2778 int ret = -ENODEV;
2779
2780 mutex_lock(&ftrace_cmd_mutex);
2781 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2782 if (strcmp(cmd->name, p->name) == 0) {
2783 ret = 0;
2784 list_del_init(&p->list);
2785 goto out_unlock;
2786 }
2787 }
2788 out_unlock:
2789 mutex_unlock(&ftrace_cmd_mutex);
2790
2791 return ret;
2792}
2793
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002794static int ftrace_process_regex(struct ftrace_hash *hash,
2795 char *buff, int len, int enable)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002796{
Steven Rostedtf6180772009-02-14 00:40:25 -05002797 char *func, *command, *next = buff;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002798 struct ftrace_func_command *p;
GuoWen Li0aff1c02011-06-01 19:18:47 +08002799 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002800
2801 func = strsep(&next, ":");
2802
2803 if (!next) {
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002804 ret = ftrace_match_records(hash, func, len);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002805 if (!ret)
2806 ret = -EINVAL;
2807 if (ret < 0)
2808 return ret;
2809 return 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002810 }
2811
Steven Rostedtf6180772009-02-14 00:40:25 -05002812 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05002813
2814 command = strsep(&next, ":");
2815
Steven Rostedtf6180772009-02-14 00:40:25 -05002816 mutex_lock(&ftrace_cmd_mutex);
2817 list_for_each_entry(p, &ftrace_commands, list) {
2818 if (strcmp(p->name, command) == 0) {
Steven Rostedt43dd61c2011-07-07 11:09:22 -04002819 ret = p->func(hash, func, command, next, enable);
Steven Rostedtf6180772009-02-14 00:40:25 -05002820 goto out_unlock;
2821 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05002822 }
Steven Rostedtf6180772009-02-14 00:40:25 -05002823 out_unlock:
2824 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002825
Steven Rostedtf6180772009-02-14 00:40:25 -05002826 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002827}
2828
Ingo Molnare309b412008-05-12 21:20:51 +02002829static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04002830ftrace_regex_write(struct file *file, const char __user *ubuf,
2831 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02002832{
2833 struct ftrace_iterator *iter;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002834 struct trace_parser *parser;
2835 ssize_t ret, read;
Steven Rostedt5072c592008-05-12 21:20:43 +02002836
Li Zefan4ba79782009-09-22 13:52:20 +08002837 if (!cnt)
Steven Rostedt5072c592008-05-12 21:20:43 +02002838 return 0;
2839
Steven Rostedt41c52c02008-05-22 11:46:33 -04002840 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002841
Steven Rostedt45a4a232011-04-21 23:16:46 -04002842 ret = -ENODEV;
2843 if (unlikely(ftrace_disabled))
2844 goto out_unlock;
2845
Steven Rostedt5072c592008-05-12 21:20:43 +02002846 if (file->f_mode & FMODE_READ) {
2847 struct seq_file *m = file->private_data;
2848 iter = m->private;
2849 } else
2850 iter = file->private_data;
2851
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002852 parser = &iter->parser;
2853 read = trace_get_user(parser, ubuf, cnt, ppos);
Steven Rostedt5072c592008-05-12 21:20:43 +02002854
Li Zefan4ba79782009-09-22 13:52:20 +08002855 if (read >= 0 && trace_parser_loaded(parser) &&
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002856 !trace_parser_cont(parser)) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002857 ret = ftrace_process_regex(iter->hash, parser->buffer,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002858 parser->idx, enable);
Li Zefan313254a2009-12-08 11:15:30 +08002859 trace_parser_clear(parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02002860 if (ret)
Li Zefaned146b252009-11-03 08:55:38 +08002861 goto out_unlock;
Steven Rostedt5072c592008-05-12 21:20:43 +02002862 }
2863
Steven Rostedt5072c592008-05-12 21:20:43 +02002864 ret = read;
Li Zefaned146b252009-11-03 08:55:38 +08002865out_unlock:
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002866 mutex_unlock(&ftrace_regex_lock);
Li Zefaned146b252009-11-03 08:55:38 +08002867
Steven Rostedt5072c592008-05-12 21:20:43 +02002868 return ret;
2869}
2870
Steven Rostedt41c52c02008-05-22 11:46:33 -04002871static ssize_t
2872ftrace_filter_write(struct file *file, const char __user *ubuf,
2873 size_t cnt, loff_t *ppos)
2874{
2875 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2876}
2877
2878static ssize_t
2879ftrace_notrace_write(struct file *file, const char __user *ubuf,
2880 size_t cnt, loff_t *ppos)
2881{
2882 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2883}
2884
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002885static int
Steven Rostedtf45948e2011-05-02 12:29:25 -04002886ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2887 int reset, int enable)
Steven Rostedt41c52c02008-05-22 11:46:33 -04002888{
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002889 struct ftrace_hash **orig_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002890 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002891 int ret;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002892
Steven Rostedt936e0742011-05-05 22:54:01 -04002893 /* All global ops uses the global ops filters */
2894 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2895 ops = &global_ops;
2896
Steven Rostedt41c52c02008-05-22 11:46:33 -04002897 if (unlikely(ftrace_disabled))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002898 return -ENODEV;
Steven Rostedt41c52c02008-05-22 11:46:33 -04002899
Steven Rostedtf45948e2011-05-02 12:29:25 -04002900 if (enable)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002901 orig_hash = &ops->filter_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002902 else
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002903 orig_hash = &ops->notrace_hash;
2904
2905 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2906 if (!hash)
2907 return -ENOMEM;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002908
Steven Rostedt41c52c02008-05-22 11:46:33 -04002909 mutex_lock(&ftrace_regex_lock);
2910 if (reset)
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002911 ftrace_filter_reset(hash);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002912 if (buf)
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04002913 ftrace_match_records(hash, buf, len);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002914
2915 mutex_lock(&ftrace_lock);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04002916 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
Steven Rostedt072126f2011-07-13 15:08:31 -04002917 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2918 && ftrace_enabled)
2919 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2920
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002921 mutex_unlock(&ftrace_lock);
2922
Steven Rostedt41c52c02008-05-22 11:46:33 -04002923 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002924
2925 free_ftrace_hash(hash);
2926 return ret;
Steven Rostedt41c52c02008-05-22 11:46:33 -04002927}
2928
Steven Rostedt77a2b372008-05-12 21:20:45 +02002929/**
2930 * ftrace_set_filter - set a function to filter on in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04002931 * @ops - the ops to set the filter with
Steven Rostedt77a2b372008-05-12 21:20:45 +02002932 * @buf - the string that holds the function filter text.
2933 * @len - the length of the string.
2934 * @reset - non zero to reset all filters before applying this filter.
2935 *
2936 * Filters denote which functions should be enabled when tracing is enabled.
2937 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2938 */
Steven Rostedt936e0742011-05-05 22:54:01 -04002939void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2940 int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02002941{
Steven Rostedt936e0742011-05-05 22:54:01 -04002942 ftrace_set_regex(ops, buf, len, reset, 1);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002943}
Steven Rostedt936e0742011-05-05 22:54:01 -04002944EXPORT_SYMBOL_GPL(ftrace_set_filter);
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002945
Steven Rostedt41c52c02008-05-22 11:46:33 -04002946/**
2947 * ftrace_set_notrace - set a function to not trace in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04002948 * @ops - the ops to set the notrace filter with
Steven Rostedt41c52c02008-05-22 11:46:33 -04002949 * @buf - the string that holds the function notrace text.
2950 * @len - the length of the string.
2951 * @reset - non zero to reset all filters before applying this filter.
2952 *
2953 * Notrace Filters denote which functions should not be enabled when tracing
2954 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2955 * for tracing.
2956 */
Steven Rostedt936e0742011-05-05 22:54:01 -04002957void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2958 int len, int reset)
2959{
2960 ftrace_set_regex(ops, buf, len, reset, 0);
2961}
2962EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2963/**
2964 * ftrace_set_filter - set a function to filter on in ftrace
2965 * @ops - the ops to set the filter with
2966 * @buf - the string that holds the function filter text.
2967 * @len - the length of the string.
2968 * @reset - non zero to reset all filters before applying this filter.
2969 *
2970 * Filters denote which functions should be enabled when tracing is enabled.
2971 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2972 */
2973void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2974{
2975 ftrace_set_regex(&global_ops, buf, len, reset, 1);
2976}
2977EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2978
2979/**
2980 * ftrace_set_notrace - set a function to not trace in ftrace
2981 * @ops - the ops to set the notrace filter with
2982 * @buf - the string that holds the function notrace text.
2983 * @len - the length of the string.
2984 * @reset - non zero to reset all filters before applying this filter.
2985 *
2986 * Notrace Filters denote which functions should not be enabled when tracing
2987 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2988 * for tracing.
2989 */
2990void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
Steven Rostedt41c52c02008-05-22 11:46:33 -04002991{
Steven Rostedtf45948e2011-05-02 12:29:25 -04002992 ftrace_set_regex(&global_ops, buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02002993}
Steven Rostedt936e0742011-05-05 22:54:01 -04002994EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
Steven Rostedt77a2b372008-05-12 21:20:45 +02002995
Steven Rostedt2af15d62009-05-28 13:37:24 -04002996/*
2997 * command line interface to allow users to set filters on boot up.
2998 */
2999#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3000static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3001static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3002
3003static int __init set_ftrace_notrace(char *str)
3004{
3005 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3006 return 1;
3007}
3008__setup("ftrace_notrace=", set_ftrace_notrace);
3009
3010static int __init set_ftrace_filter(char *str)
3011{
3012 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3013 return 1;
3014}
3015__setup("ftrace_filter=", set_ftrace_filter);
3016
Stefan Assmann369bc182009-10-12 22:17:21 +02003017#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Lai Jiangshanf6060f42009-11-05 11:16:17 +08003018static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
Steven Rostedt801c29f2010-03-05 20:02:19 -05003019static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3020
Stefan Assmann369bc182009-10-12 22:17:21 +02003021static int __init set_graph_function(char *str)
3022{
Frederic Weisbecker06f43d62009-10-14 20:43:39 +02003023 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
Stefan Assmann369bc182009-10-12 22:17:21 +02003024 return 1;
3025}
3026__setup("ftrace_graph_filter=", set_graph_function);
3027
3028static void __init set_ftrace_early_graph(char *buf)
3029{
3030 int ret;
3031 char *func;
3032
3033 while (buf) {
3034 func = strsep(&buf, ",");
3035 /* we allow only one expression at a time */
3036 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3037 func);
3038 if (ret)
3039 printk(KERN_DEBUG "ftrace: function %s not "
3040 "traceable\n", func);
3041 }
3042}
3043#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3044
Steven Rostedtf45948e2011-05-02 12:29:25 -04003045static void __init
3046set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
Steven Rostedt2af15d62009-05-28 13:37:24 -04003047{
3048 char *func;
3049
3050 while (buf) {
3051 func = strsep(&buf, ",");
Steven Rostedtf45948e2011-05-02 12:29:25 -04003052 ftrace_set_regex(ops, func, strlen(func), 0, enable);
Steven Rostedt2af15d62009-05-28 13:37:24 -04003053 }
3054}
3055
3056static void __init set_ftrace_early_filters(void)
3057{
3058 if (ftrace_filter_buf[0])
Steven Rostedtf45948e2011-05-02 12:29:25 -04003059 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
Steven Rostedt2af15d62009-05-28 13:37:24 -04003060 if (ftrace_notrace_buf[0])
Steven Rostedtf45948e2011-05-02 12:29:25 -04003061 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02003062#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3063 if (ftrace_graph_buf[0])
3064 set_ftrace_early_graph(ftrace_graph_buf);
3065#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Steven Rostedt2af15d62009-05-28 13:37:24 -04003066}
3067
Ingo Molnare309b412008-05-12 21:20:51 +02003068static int
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003069ftrace_regex_release(struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02003070{
3071 struct seq_file *m = (struct seq_file *)file->private_data;
3072 struct ftrace_iterator *iter;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003073 struct ftrace_hash **orig_hash;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003074 struct trace_parser *parser;
Steven Rostedted926f92011-05-03 13:25:24 -04003075 int filter_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003076 int ret;
Steven Rostedt5072c592008-05-12 21:20:43 +02003077
Steven Rostedt41c52c02008-05-22 11:46:33 -04003078 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003079 if (file->f_mode & FMODE_READ) {
3080 iter = m->private;
3081
3082 seq_release(inode, file);
3083 } else
3084 iter = file->private_data;
3085
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003086 parser = &iter->parser;
3087 if (trace_parser_loaded(parser)) {
3088 parser->buffer[parser->idx] = 0;
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003089 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
Steven Rostedt5072c592008-05-12 21:20:43 +02003090 }
3091
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003092 trace_parser_put(parser);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003093
Steven Rostedt058e2972011-04-29 22:35:33 -04003094 if (file->f_mode & FMODE_WRITE) {
Steven Rostedted926f92011-05-03 13:25:24 -04003095 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3096
3097 if (filter_hash)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003098 orig_hash = &iter->ops->filter_hash;
Steven Rostedted926f92011-05-03 13:25:24 -04003099 else
3100 orig_hash = &iter->ops->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003101
Steven Rostedt058e2972011-04-29 22:35:33 -04003102 mutex_lock(&ftrace_lock);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04003103 ret = ftrace_hash_move(iter->ops, filter_hash,
3104 orig_hash, iter->hash);
3105 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3106 && ftrace_enabled)
3107 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3108
Steven Rostedt058e2972011-04-29 22:35:33 -04003109 mutex_unlock(&ftrace_lock);
3110 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003111 free_ftrace_hash(iter->hash);
3112 kfree(iter);
Steven Rostedt058e2972011-04-29 22:35:33 -04003113
Steven Rostedt41c52c02008-05-22 11:46:33 -04003114 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003115 return 0;
3116}
3117
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003118static const struct file_operations ftrace_avail_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003119 .open = ftrace_avail_open,
3120 .read = seq_read,
3121 .llseek = seq_lseek,
Li Zefan3be04b42009-08-17 16:54:03 +08003122 .release = seq_release_private,
Steven Rostedt5072c592008-05-12 21:20:43 +02003123};
3124
Steven Rostedt647bcd02011-05-03 14:39:21 -04003125static const struct file_operations ftrace_enabled_fops = {
3126 .open = ftrace_enabled_open,
3127 .read = seq_read,
3128 .llseek = seq_lseek,
3129 .release = seq_release_private,
3130};
3131
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003132static const struct file_operations ftrace_filter_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003133 .open = ftrace_filter_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08003134 .read = seq_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02003135 .write = ftrace_filter_write,
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003136 .llseek = ftrace_regex_lseek,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003137 .release = ftrace_regex_release,
Steven Rostedt5072c592008-05-12 21:20:43 +02003138};
3139
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003140static const struct file_operations ftrace_notrace_fops = {
Steven Rostedt41c52c02008-05-22 11:46:33 -04003141 .open = ftrace_notrace_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08003142 .read = seq_read,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003143 .write = ftrace_notrace_write,
3144 .llseek = ftrace_regex_lseek,
Steven Rostedt1cf41dd72011-04-29 20:59:51 -04003145 .release = ftrace_regex_release,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003146};
3147
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003148#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3149
3150static DEFINE_MUTEX(graph_lock);
3151
3152int ftrace_graph_count;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003153int ftrace_graph_filter_enabled;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003154unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3155
3156static void *
Li Zefan85951842009-06-24 09:54:00 +08003157__g_next(struct seq_file *m, loff_t *pos)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003158{
Li Zefan85951842009-06-24 09:54:00 +08003159 if (*pos >= ftrace_graph_count)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003160 return NULL;
Li Zefana4ec5e02009-09-18 14:06:28 +08003161 return &ftrace_graph_funcs[*pos];
Li Zefan85951842009-06-24 09:54:00 +08003162}
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003163
Li Zefan85951842009-06-24 09:54:00 +08003164static void *
3165g_next(struct seq_file *m, void *v, loff_t *pos)
3166{
3167 (*pos)++;
3168 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003169}
3170
3171static void *g_start(struct seq_file *m, loff_t *pos)
3172{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003173 mutex_lock(&graph_lock);
3174
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003175 /* Nothing, tell g_show to print all functions are enabled */
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003176 if (!ftrace_graph_filter_enabled && !*pos)
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003177 return (void *)1;
3178
Li Zefan85951842009-06-24 09:54:00 +08003179 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003180}
3181
3182static void g_stop(struct seq_file *m, void *p)
3183{
3184 mutex_unlock(&graph_lock);
3185}
3186
3187static int g_show(struct seq_file *m, void *v)
3188{
3189 unsigned long *ptr = v;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003190
3191 if (!ptr)
3192 return 0;
3193
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003194 if (ptr == (unsigned long *)1) {
3195 seq_printf(m, "#### all functions enabled ####\n");
3196 return 0;
3197 }
3198
Steven Rostedtb375a112009-09-17 00:05:58 -04003199 seq_printf(m, "%ps\n", (void *)*ptr);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003200
3201 return 0;
3202}
3203
James Morris88e9d342009-09-22 16:43:43 -07003204static const struct seq_operations ftrace_graph_seq_ops = {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003205 .start = g_start,
3206 .next = g_next,
3207 .stop = g_stop,
3208 .show = g_show,
3209};
3210
3211static int
3212ftrace_graph_open(struct inode *inode, struct file *file)
3213{
3214 int ret = 0;
3215
3216 if (unlikely(ftrace_disabled))
3217 return -ENODEV;
3218
3219 mutex_lock(&graph_lock);
3220 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04003221 (file->f_flags & O_TRUNC)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003222 ftrace_graph_filter_enabled = 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003223 ftrace_graph_count = 0;
3224 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3225 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003226 mutex_unlock(&graph_lock);
3227
Li Zefana4ec5e02009-09-18 14:06:28 +08003228 if (file->f_mode & FMODE_READ)
3229 ret = seq_open(file, &ftrace_graph_seq_ops);
3230
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003231 return ret;
3232}
3233
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003234static int
Li Zefan87827112009-07-23 11:29:11 +08003235ftrace_graph_release(struct inode *inode, struct file *file)
3236{
3237 if (file->f_mode & FMODE_READ)
3238 seq_release(inode, file);
3239 return 0;
3240}
3241
3242static int
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003243ftrace_set_func(unsigned long *array, int *idx, char *buffer)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003244{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003245 struct dyn_ftrace *rec;
3246 struct ftrace_page *pg;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003247 int search_len;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003248 int fail = 1;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003249 int type, not;
3250 char *search;
3251 bool exists;
3252 int i;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003253
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003254 /* decode regex */
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02003255 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003256 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3257 return -EBUSY;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003258
3259 search_len = strlen(search);
3260
Steven Rostedt52baf112009-02-14 01:15:39 -05003261 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003262
3263 if (unlikely(ftrace_disabled)) {
3264 mutex_unlock(&ftrace_lock);
3265 return -ENODEV;
3266 }
3267
Steven Rostedt265c8312009-02-13 12:43:56 -05003268 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003269
Steven Rostedt45a4a232011-04-21 23:16:46 -04003270 if (rec->flags & FTRACE_FL_FREE)
Steven Rostedt265c8312009-02-13 12:43:56 -05003271 continue;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003272
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003273 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003274 /* if it is in the array */
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003275 exists = false;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003276 for (i = 0; i < *idx; i++) {
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003277 if (array[i] == rec->ip) {
3278 exists = true;
Steven Rostedt265c8312009-02-13 12:43:56 -05003279 break;
3280 }
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003281 }
3282
3283 if (!not) {
3284 fail = 0;
3285 if (!exists) {
3286 array[(*idx)++] = rec->ip;
3287 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3288 goto out;
3289 }
3290 } else {
3291 if (exists) {
3292 array[i] = array[--(*idx)];
3293 array[*idx] = 0;
3294 fail = 0;
3295 }
3296 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003297 }
Steven Rostedt265c8312009-02-13 12:43:56 -05003298 } while_for_each_ftrace_rec();
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003299out:
Steven Rostedt52baf112009-02-14 01:15:39 -05003300 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003301
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003302 if (fail)
3303 return -EINVAL;
3304
3305 ftrace_graph_filter_enabled = 1;
3306 return 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003307}
3308
3309static ssize_t
3310ftrace_graph_write(struct file *file, const char __user *ubuf,
3311 size_t cnt, loff_t *ppos)
3312{
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003313 struct trace_parser parser;
Li Zefan4ba79782009-09-22 13:52:20 +08003314 ssize_t read, ret;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003315
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003316 if (!cnt)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003317 return 0;
3318
3319 mutex_lock(&graph_lock);
3320
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003321 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3322 ret = -ENOMEM;
Li Zefan1eb90f12009-09-22 13:52:57 +08003323 goto out_unlock;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003324 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003325
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003326 read = trace_get_user(&parser, ubuf, cnt, ppos);
3327
Li Zefan4ba79782009-09-22 13:52:20 +08003328 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003329 parser.buffer[parser.idx] = 0;
3330
3331 /* we allow only one expression at a time */
Li Zefana4ec5e02009-09-18 14:06:28 +08003332 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003333 parser.buffer);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003334 if (ret)
Li Zefan1eb90f12009-09-22 13:52:57 +08003335 goto out_free;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003336 }
3337
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003338 ret = read;
Li Zefan1eb90f12009-09-22 13:52:57 +08003339
3340out_free:
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003341 trace_parser_put(&parser);
Li Zefan1eb90f12009-09-22 13:52:57 +08003342out_unlock:
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003343 mutex_unlock(&graph_lock);
3344
3345 return ret;
3346}
3347
3348static const struct file_operations ftrace_graph_fops = {
Li Zefan87827112009-07-23 11:29:11 +08003349 .open = ftrace_graph_open,
3350 .read = seq_read,
3351 .write = ftrace_graph_write,
3352 .release = ftrace_graph_release,
Arnd Bergmann6038f372010-08-15 18:52:59 +02003353 .llseek = seq_lseek,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003354};
3355#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3356
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003357static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02003358{
Steven Rostedt5072c592008-05-12 21:20:43 +02003359
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003360 trace_create_file("available_filter_functions", 0444,
3361 d_tracer, NULL, &ftrace_avail_fops);
Steven Rostedt5072c592008-05-12 21:20:43 +02003362
Steven Rostedt647bcd02011-05-03 14:39:21 -04003363 trace_create_file("enabled_functions", 0444,
3364 d_tracer, NULL, &ftrace_enabled_fops);
3365
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003366 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3367 NULL, &ftrace_filter_fops);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003368
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003369 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003370 NULL, &ftrace_notrace_fops);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04003371
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003372#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003373 trace_create_file("set_graph_function", 0444, d_tracer,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003374 NULL,
3375 &ftrace_graph_fops);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003376#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3377
Steven Rostedt5072c592008-05-12 21:20:43 +02003378 return 0;
3379}
3380
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003381static int ftrace_process_locs(struct module *mod,
Steven Rostedt31e88902008-11-14 16:21:19 -08003382 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003383 unsigned long *end)
3384{
3385 unsigned long *p;
3386 unsigned long addr;
Steven Rostedt4376cac2011-06-24 23:28:13 -04003387 unsigned long flags = 0; /* Shut up gcc */
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003388
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003389 mutex_lock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003390 p = start;
3391 while (p < end) {
3392 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08003393 /*
3394 * Some architecture linkers will pad between
3395 * the different mcount_loc sections of different
3396 * object files to satisfy alignments.
3397 * Skip any NULL pointers.
3398 */
3399 if (!addr)
3400 continue;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003401 ftrace_record_ip(addr);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003402 }
3403
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003404 /*
Steven Rostedt4376cac2011-06-24 23:28:13 -04003405 * We only need to disable interrupts on start up
3406 * because we are modifying code that an interrupt
3407 * may execute, and the modification is not atomic.
3408 * But for modules, nothing runs the code we modify
3409 * until we are finished with it, and there's no
3410 * reason to cause large interrupt latencies while we do it.
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003411 */
Steven Rostedt4376cac2011-06-24 23:28:13 -04003412 if (!mod)
3413 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08003414 ftrace_update_code(mod);
Steven Rostedt4376cac2011-06-24 23:28:13 -04003415 if (!mod)
3416 local_irq_restore(flags);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003417 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003418
3419 return 0;
3420}
3421
Steven Rostedt93eb6772009-04-15 13:24:06 -04003422#ifdef CONFIG_MODULES
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003423void ftrace_release_mod(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04003424{
3425 struct dyn_ftrace *rec;
3426 struct ftrace_page *pg;
Steven Rostedt93eb6772009-04-15 13:24:06 -04003427
Steven Rostedt93eb6772009-04-15 13:24:06 -04003428 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003429
3430 if (ftrace_disabled)
3431 goto out_unlock;
3432
Steven Rostedt93eb6772009-04-15 13:24:06 -04003433 do_for_each_ftrace_rec(pg, rec) {
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003434 if (within_module_core(rec->ip, mod)) {
Steven Rostedt93eb6772009-04-15 13:24:06 -04003435 /*
3436 * rec->ip is changed in ftrace_free_rec()
3437 * It should not between s and e if record was freed.
3438 */
3439 FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3440 ftrace_free_rec(rec);
3441 }
3442 } while_for_each_ftrace_rec();
Steven Rostedt45a4a232011-04-21 23:16:46 -04003443 out_unlock:
Steven Rostedt93eb6772009-04-15 13:24:06 -04003444 mutex_unlock(&ftrace_lock);
3445}
3446
3447static void ftrace_init_module(struct module *mod,
3448 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04003449{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04003450 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04003451 return;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003452 ftrace_process_locs(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04003453}
3454
Steven Rostedt93eb6772009-04-15 13:24:06 -04003455static int ftrace_module_notify(struct notifier_block *self,
3456 unsigned long val, void *data)
3457{
3458 struct module *mod = data;
3459
3460 switch (val) {
3461 case MODULE_STATE_COMING:
3462 ftrace_init_module(mod, mod->ftrace_callsites,
3463 mod->ftrace_callsites +
3464 mod->num_ftrace_callsites);
3465 break;
3466 case MODULE_STATE_GOING:
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003467 ftrace_release_mod(mod);
Steven Rostedt93eb6772009-04-15 13:24:06 -04003468 break;
3469 }
3470
3471 return 0;
3472}
3473#else
3474static int ftrace_module_notify(struct notifier_block *self,
3475 unsigned long val, void *data)
3476{
3477 return 0;
3478}
3479#endif /* CONFIG_MODULES */
3480
3481struct notifier_block ftrace_module_nb = {
3482 .notifier_call = ftrace_module_notify,
3483 .priority = 0,
3484};
3485
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003486extern unsigned long __start_mcount_loc[];
3487extern unsigned long __stop_mcount_loc[];
3488
3489void __init ftrace_init(void)
3490{
3491 unsigned long count, addr, flags;
3492 int ret;
3493
3494 /* Keep the ftrace pointer to the stub */
3495 addr = (unsigned long)ftrace_stub;
3496
3497 local_irq_save(flags);
3498 ftrace_dyn_arch_init(&addr);
3499 local_irq_restore(flags);
3500
3501 /* ftrace_dyn_arch_init places the return code in addr */
3502 if (addr)
3503 goto failed;
3504
3505 count = __stop_mcount_loc - __start_mcount_loc;
3506
3507 ret = ftrace_dyn_table_alloc(count);
3508 if (ret)
3509 goto failed;
3510
3511 last_ftrace_enabled = ftrace_enabled = 1;
3512
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003513 ret = ftrace_process_locs(NULL,
Steven Rostedt31e88902008-11-14 16:21:19 -08003514 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003515 __stop_mcount_loc);
3516
Steven Rostedt93eb6772009-04-15 13:24:06 -04003517 ret = register_module_notifier(&ftrace_module_nb);
Ming Lei24ed0c42009-05-17 15:31:38 +08003518 if (ret)
Steven Rostedt93eb6772009-04-15 13:24:06 -04003519 pr_warning("Failed to register trace ftrace module notifier\n");
3520
Steven Rostedt2af15d62009-05-28 13:37:24 -04003521 set_ftrace_early_filters();
3522
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003523 return;
3524 failed:
3525 ftrace_disabled = 1;
3526}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003527
Steven Rostedt3d083392008-05-12 21:20:42 +02003528#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01003529
Steven Rostedt2b499382011-05-03 22:49:52 -04003530static struct ftrace_ops global_ops = {
Steven Rostedtbd69c302011-05-03 21:55:54 -04003531 .func = ftrace_stub,
3532};
3533
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01003534static int __init ftrace_nodyn_init(void)
3535{
3536 ftrace_enabled = 1;
3537 return 0;
3538}
3539device_initcall(ftrace_nodyn_init);
3540
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003541static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3542static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05003543/* Keep as macros so we do not need to define the commands */
Steven Rostedt3b6cfdb2011-05-23 15:33:49 -04003544# define ftrace_startup(ops, command) \
3545 ({ \
3546 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3547 0; \
3548 })
Steven Rostedtbd69c302011-05-03 21:55:54 -04003549# define ftrace_shutdown(ops, command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02003550# define ftrace_startup_sysctl() do { } while (0)
3551# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedtb8489142011-05-04 09:27:52 -04003552
3553static inline int
3554ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3555{
3556 return 1;
3557}
3558
Steven Rostedt3d083392008-05-12 21:20:42 +02003559#endif /* CONFIG_DYNAMIC_FTRACE */
3560
Steven Rostedtb8489142011-05-04 09:27:52 -04003561static void
3562ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3563{
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003564 struct ftrace_ops *op;
Steven Rostedtb8489142011-05-04 09:27:52 -04003565
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04003566 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3567 return;
3568
3569 trace_recursion_set(TRACE_INTERNAL_BIT);
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003570 /*
3571 * Some of the ops may be dynamically allocated,
3572 * they must be freed after a synchronize_sched().
3573 */
3574 preempt_disable_notrace();
3575 op = rcu_dereference_raw(ftrace_ops_list);
Steven Rostedtb8489142011-05-04 09:27:52 -04003576 while (op != &ftrace_list_end) {
3577 if (ftrace_ops_test(op, ip))
3578 op->func(ip, parent_ip);
3579 op = rcu_dereference_raw(op->next);
3580 };
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003581 preempt_enable_notrace();
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04003582 trace_recursion_clear(TRACE_INTERNAL_BIT);
Steven Rostedtb8489142011-05-04 09:27:52 -04003583}
3584
Steven Rostedte32d8952008-12-04 00:26:41 -05003585static void clear_ftrace_swapper(void)
3586{
3587 struct task_struct *p;
3588 int cpu;
3589
3590 get_online_cpus();
3591 for_each_online_cpu(cpu) {
3592 p = idle_task(cpu);
3593 clear_tsk_trace_trace(p);
3594 }
3595 put_online_cpus();
3596}
3597
3598static void set_ftrace_swapper(void)
3599{
3600 struct task_struct *p;
3601 int cpu;
3602
3603 get_online_cpus();
3604 for_each_online_cpu(cpu) {
3605 p = idle_task(cpu);
3606 set_tsk_trace_trace(p);
3607 }
3608 put_online_cpus();
3609}
3610
3611static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05003612{
3613 struct task_struct *p;
3614
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01003615 rcu_read_lock();
Steven Rostedte32d8952008-12-04 00:26:41 -05003616 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05003617 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05003618 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01003619 rcu_read_unlock();
3620
Steven Rostedte32d8952008-12-04 00:26:41 -05003621 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05003622}
3623
Steven Rostedte32d8952008-12-04 00:26:41 -05003624static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05003625{
3626 struct task_struct *p;
3627
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01003628 rcu_read_lock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05003629 do_each_pid_task(pid, PIDTYPE_PID, p) {
3630 set_tsk_trace_trace(p);
3631 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01003632 rcu_read_unlock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05003633}
3634
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003635static void clear_ftrace_pid_task(struct pid *pid)
Steven Rostedte32d8952008-12-04 00:26:41 -05003636{
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003637 if (pid == ftrace_swapper_pid)
Steven Rostedte32d8952008-12-04 00:26:41 -05003638 clear_ftrace_swapper();
3639 else
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003640 clear_ftrace_pid(pid);
Steven Rostedte32d8952008-12-04 00:26:41 -05003641}
3642
3643static void set_ftrace_pid_task(struct pid *pid)
3644{
3645 if (pid == ftrace_swapper_pid)
3646 set_ftrace_swapper();
3647 else
3648 set_ftrace_pid(pid);
3649}
3650
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003651static int ftrace_pid_add(int p)
3652{
3653 struct pid *pid;
3654 struct ftrace_pid *fpid;
3655 int ret = -EINVAL;
3656
3657 mutex_lock(&ftrace_lock);
3658
3659 if (!p)
3660 pid = ftrace_swapper_pid;
3661 else
3662 pid = find_get_pid(p);
3663
3664 if (!pid)
3665 goto out;
3666
3667 ret = 0;
3668
3669 list_for_each_entry(fpid, &ftrace_pids, list)
3670 if (fpid->pid == pid)
3671 goto out_put;
3672
3673 ret = -ENOMEM;
3674
3675 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3676 if (!fpid)
3677 goto out_put;
3678
3679 list_add(&fpid->list, &ftrace_pids);
3680 fpid->pid = pid;
3681
3682 set_ftrace_pid_task(pid);
3683
3684 ftrace_update_pid_func();
3685 ftrace_startup_enable(0);
3686
3687 mutex_unlock(&ftrace_lock);
3688 return 0;
3689
3690out_put:
3691 if (pid != ftrace_swapper_pid)
3692 put_pid(pid);
3693
3694out:
3695 mutex_unlock(&ftrace_lock);
3696 return ret;
3697}
3698
3699static void ftrace_pid_reset(void)
3700{
3701 struct ftrace_pid *fpid, *safe;
3702
3703 mutex_lock(&ftrace_lock);
3704 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3705 struct pid *pid = fpid->pid;
3706
3707 clear_ftrace_pid_task(pid);
3708
3709 list_del(&fpid->list);
3710 kfree(fpid);
3711 }
3712
3713 ftrace_update_pid_func();
3714 ftrace_startup_enable(0);
3715
3716 mutex_unlock(&ftrace_lock);
3717}
3718
3719static void *fpid_start(struct seq_file *m, loff_t *pos)
3720{
3721 mutex_lock(&ftrace_lock);
3722
3723 if (list_empty(&ftrace_pids) && (!*pos))
3724 return (void *) 1;
3725
3726 return seq_list_start(&ftrace_pids, *pos);
3727}
3728
3729static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3730{
3731 if (v == (void *)1)
3732 return NULL;
3733
3734 return seq_list_next(v, &ftrace_pids, pos);
3735}
3736
3737static void fpid_stop(struct seq_file *m, void *p)
3738{
3739 mutex_unlock(&ftrace_lock);
3740}
3741
3742static int fpid_show(struct seq_file *m, void *v)
3743{
3744 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3745
3746 if (v == (void *)1) {
3747 seq_printf(m, "no pid\n");
3748 return 0;
3749 }
3750
3751 if (fpid->pid == ftrace_swapper_pid)
3752 seq_printf(m, "swapper tasks\n");
3753 else
3754 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3755
3756 return 0;
3757}
3758
3759static const struct seq_operations ftrace_pid_sops = {
3760 .start = fpid_start,
3761 .next = fpid_next,
3762 .stop = fpid_stop,
3763 .show = fpid_show,
3764};
3765
3766static int
3767ftrace_pid_open(struct inode *inode, struct file *file)
3768{
3769 int ret = 0;
3770
3771 if ((file->f_mode & FMODE_WRITE) &&
3772 (file->f_flags & O_TRUNC))
3773 ftrace_pid_reset();
3774
3775 if (file->f_mode & FMODE_READ)
3776 ret = seq_open(file, &ftrace_pid_sops);
3777
3778 return ret;
3779}
3780
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003781static ssize_t
3782ftrace_pid_write(struct file *filp, const char __user *ubuf,
3783 size_t cnt, loff_t *ppos)
3784{
Ingo Molnar457dc922009-11-23 11:03:28 +01003785 char buf[64], *tmp;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003786 long val;
3787 int ret;
3788
3789 if (cnt >= sizeof(buf))
3790 return -EINVAL;
3791
3792 if (copy_from_user(&buf, ubuf, cnt))
3793 return -EFAULT;
3794
3795 buf[cnt] = 0;
3796
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003797 /*
3798 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3799 * to clean the filter quietly.
3800 */
Ingo Molnar457dc922009-11-23 11:03:28 +01003801 tmp = strstrip(buf);
3802 if (strlen(tmp) == 0)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003803 return 1;
3804
Ingo Molnar457dc922009-11-23 11:03:28 +01003805 ret = strict_strtol(tmp, 10, &val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003806 if (ret < 0)
3807 return ret;
3808
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003809 ret = ftrace_pid_add(val);
Steven Rostedt978f3a42008-12-04 00:26:40 -05003810
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003811 return ret ? ret : cnt;
3812}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003813
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003814static int
3815ftrace_pid_release(struct inode *inode, struct file *file)
3816{
3817 if (file->f_mode & FMODE_READ)
3818 seq_release(inode, file);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003819
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003820 return 0;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003821}
3822
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003823static const struct file_operations ftrace_pid_fops = {
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04003824 .open = ftrace_pid_open,
3825 .write = ftrace_pid_write,
3826 .read = seq_read,
3827 .llseek = seq_lseek,
3828 .release = ftrace_pid_release,
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003829};
3830
3831static __init int ftrace_init_debugfs(void)
3832{
3833 struct dentry *d_tracer;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003834
3835 d_tracer = tracing_init_dentry();
3836 if (!d_tracer)
3837 return 0;
3838
3839 ftrace_init_dyn_debugfs(d_tracer);
3840
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003841 trace_create_file("set_ftrace_pid", 0644, d_tracer,
3842 NULL, &ftrace_pid_fops);
Steven Rostedt493762f2009-03-23 17:12:36 -04003843
3844 ftrace_profile_debugfs(d_tracer);
3845
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003846 return 0;
3847}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003848fs_initcall(ftrace_init_debugfs);
3849
Steven Rostedt3d083392008-05-12 21:20:42 +02003850/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04003851 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003852 *
3853 * This function should be used by panic code. It stops ftrace
3854 * but in a not so nice way. If you need to simply kill ftrace
3855 * from a non-atomic section, use ftrace_kill.
3856 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04003857void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003858{
3859 ftrace_disabled = 1;
3860 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04003861 clear_ftrace_function();
3862}
3863
3864/**
Steven Rostedte0a413f2011-09-29 21:26:16 -04003865 * Test if ftrace is dead or not.
3866 */
3867int ftrace_is_dead(void)
3868{
3869 return ftrace_disabled;
3870}
3871
3872/**
Steven Rostedt3d083392008-05-12 21:20:42 +02003873 * register_ftrace_function - register a function for profiling
3874 * @ops - ops structure that holds the function for profiling.
3875 *
3876 * Register a function to be called by all functions in the
3877 * kernel.
3878 *
3879 * Note: @ops->func and all the functions it calls must be labeled
3880 * with "notrace", otherwise it will go into a
3881 * recursive loop.
3882 */
3883int register_ftrace_function(struct ftrace_ops *ops)
3884{
Steven Rostedt45a4a232011-04-21 23:16:46 -04003885 int ret = -1;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003886
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003887 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003888
Steven Rostedt45a4a232011-04-21 23:16:46 -04003889 if (unlikely(ftrace_disabled))
3890 goto out_unlock;
3891
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003892 ret = __register_ftrace_function(ops);
Steven Rostedtb8489142011-05-04 09:27:52 -04003893 if (!ret)
Steven Rostedta1cd6172011-05-23 15:24:25 -04003894 ret = ftrace_startup(ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04003895
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003896
Steven Rostedt45a4a232011-04-21 23:16:46 -04003897 out_unlock:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003898 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003899 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02003900}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003901EXPORT_SYMBOL_GPL(register_ftrace_function);
Steven Rostedt3d083392008-05-12 21:20:42 +02003902
3903/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01003904 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02003905 * @ops - ops structure that holds the function to unregister
3906 *
3907 * Unregister a function that was added to be called by ftrace profiling.
3908 */
3909int unregister_ftrace_function(struct ftrace_ops *ops)
3910{
3911 int ret;
3912
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003913 mutex_lock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02003914 ret = __unregister_ftrace_function(ops);
Steven Rostedtb8489142011-05-04 09:27:52 -04003915 if (!ret)
3916 ftrace_shutdown(ops, 0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003917 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003918
3919 return ret;
3920}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003921EXPORT_SYMBOL_GPL(unregister_ftrace_function);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003922
Ingo Molnare309b412008-05-12 21:20:51 +02003923int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003924ftrace_enable_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003925 void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003926 loff_t *ppos)
3927{
Steven Rostedt45a4a232011-04-21 23:16:46 -04003928 int ret = -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003929
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003930 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003931
Steven Rostedt45a4a232011-04-21 23:16:46 -04003932 if (unlikely(ftrace_disabled))
3933 goto out;
3934
3935 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003936
Li Zefana32c7762009-06-26 16:55:51 +08003937 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003938 goto out;
3939
Li Zefana32c7762009-06-26 16:55:51 +08003940 last_ftrace_enabled = !!ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003941
3942 if (ftrace_enabled) {
3943
3944 ftrace_startup_sysctl();
3945
3946 /* we are starting ftrace again */
Steven Rostedtb8489142011-05-04 09:27:52 -04003947 if (ftrace_ops_list != &ftrace_list_end) {
3948 if (ftrace_ops_list->next == &ftrace_list_end)
3949 ftrace_trace_function = ftrace_ops_list->func;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003950 else
Steven Rostedtb8489142011-05-04 09:27:52 -04003951 ftrace_trace_function = ftrace_ops_list_func;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02003952 }
3953
3954 } else {
3955 /* stopping ftrace calls (just send to ftrace_stub) */
3956 ftrace_trace_function = ftrace_stub;
3957
3958 ftrace_shutdown_sysctl();
3959 }
3960
3961 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003962 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02003963 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02003964}
Ingo Molnarf17845e2008-10-24 12:47:10 +02003965
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01003966#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01003967
Steven Rostedt597af812009-04-03 15:24:12 -04003968static int ftrace_graph_active;
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08003969static struct notifier_block ftrace_suspend_notifier;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003970
Steven Rostedte49dc192008-12-02 23:50:05 -05003971int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3972{
3973 return 0;
3974}
3975
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01003976/* The callbacks that hook a function */
3977trace_func_graph_ret_t ftrace_graph_return =
3978 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05003979trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01003980
3981/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3982static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3983{
3984 int i;
3985 int ret = 0;
3986 unsigned long flags;
3987 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3988 struct task_struct *g, *t;
3989
3990 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3991 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3992 * sizeof(struct ftrace_ret_stack),
3993 GFP_KERNEL);
3994 if (!ret_stack_list[i]) {
3995 start = 0;
3996 end = i;
3997 ret = -ENOMEM;
3998 goto free;
3999 }
4000 }
4001
4002 read_lock_irqsave(&tasklist_lock, flags);
4003 do_each_thread(g, t) {
4004 if (start == end) {
4005 ret = -EAGAIN;
4006 goto unlock;
4007 }
4008
4009 if (t->ret_stack == NULL) {
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01004010 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004011 atomic_set(&t->trace_overrun, 0);
Steven Rostedt26c01622009-06-02 14:01:19 -04004012 t->curr_ret_stack = -1;
4013 /* Make sure the tasks see the -1 first: */
4014 smp_wmb();
4015 t->ret_stack = ret_stack_list[start++];
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004016 }
4017 } while_each_thread(g, t);
4018
4019unlock:
4020 read_unlock_irqrestore(&tasklist_lock, flags);
4021free:
4022 for (i = start; i < end; i++)
4023 kfree(ret_stack_list[i]);
4024 return ret;
4025}
4026
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004027static void
Steven Rostedt38516ab2010-04-20 17:04:50 -04004028ftrace_graph_probe_sched_switch(void *ignore,
4029 struct task_struct *prev, struct task_struct *next)
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004030{
4031 unsigned long long timestamp;
4032 int index;
4033
Steven Rostedtbe6f1642009-03-24 11:06:24 -04004034 /*
4035 * Does the user want to count the time a function was asleep.
4036 * If so, do not update the time stamps.
4037 */
4038 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4039 return;
4040
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004041 timestamp = trace_clock_local();
4042
4043 prev->ftrace_timestamp = timestamp;
4044
4045 /* only process tasks that we timestamped */
4046 if (!next->ftrace_timestamp)
4047 return;
4048
4049 /*
4050 * Update all the counters in next to make up for the
4051 * time next was sleeping.
4052 */
4053 timestamp -= next->ftrace_timestamp;
4054
4055 for (index = next->curr_ret_stack; index >= 0; index--)
4056 next->ret_stack[index].calltime += timestamp;
4057}
4058
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004059/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004060static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004061{
4062 struct ftrace_ret_stack **ret_stack_list;
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004063 int ret, cpu;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004064
4065 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4066 sizeof(struct ftrace_ret_stack *),
4067 GFP_KERNEL);
4068
4069 if (!ret_stack_list)
4070 return -ENOMEM;
4071
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004072 /* The cpu_boot init_task->ret_stack will never be freed */
Steven Rostedt179c4982009-06-02 12:03:19 -04004073 for_each_online_cpu(cpu) {
4074 if (!idle_task(cpu)->ret_stack)
Steven Rostedt868baf02011-02-10 21:26:13 -05004075 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
Steven Rostedt179c4982009-06-02 12:03:19 -04004076 }
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004077
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004078 do {
4079 ret = alloc_retstack_tasklist(ret_stack_list);
4080 } while (ret == -EAGAIN);
4081
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004082 if (!ret) {
Steven Rostedt38516ab2010-04-20 17:04:50 -04004083 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004084 if (ret)
4085 pr_info("ftrace_graph: Couldn't activate tracepoint"
4086 " probe to kernel_sched_switch\n");
4087 }
4088
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004089 kfree(ret_stack_list);
4090 return ret;
4091}
4092
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004093/*
4094 * Hibernation protection.
4095 * The state of the current task is too much unstable during
4096 * suspend/restore to disk. We want to protect against that.
4097 */
4098static int
4099ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4100 void *unused)
4101{
4102 switch (state) {
4103 case PM_HIBERNATION_PREPARE:
4104 pause_graph_tracing();
4105 break;
4106
4107 case PM_POST_HIBERNATION:
4108 unpause_graph_tracing();
4109 break;
4110 }
4111 return NOTIFY_DONE;
4112}
4113
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004114int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4115 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004116{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004117 int ret = 0;
4118
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004119 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004120
Steven Rostedt05ce5812009-03-24 00:18:31 -04004121 /* we currently allow only one tracer registered at a time */
Steven Rostedt597af812009-04-03 15:24:12 -04004122 if (ftrace_graph_active) {
Steven Rostedt05ce5812009-03-24 00:18:31 -04004123 ret = -EBUSY;
4124 goto out;
4125 }
4126
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004127 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4128 register_pm_notifier(&ftrace_suspend_notifier);
4129
Steven Rostedt597af812009-04-03 15:24:12 -04004130 ftrace_graph_active++;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004131 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004132 if (ret) {
Steven Rostedt597af812009-04-03 15:24:12 -04004133 ftrace_graph_active--;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004134 goto out;
4135 }
Steven Rostedte53a6312008-11-26 00:16:25 -05004136
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004137 ftrace_graph_return = retfunc;
4138 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05004139
Steven Rostedta1cd6172011-05-23 15:24:25 -04004140 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004141
4142out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004143 mutex_unlock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004144 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004145}
4146
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004147void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004148{
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004149 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004150
Steven Rostedt597af812009-04-03 15:24:12 -04004151 if (unlikely(!ftrace_graph_active))
Steven Rostedt2aad1b72009-03-30 11:11:28 -04004152 goto out;
4153
Steven Rostedt597af812009-04-03 15:24:12 -04004154 ftrace_graph_active--;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004155 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05004156 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedtbd69c302011-05-03 21:55:54 -04004157 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004158 unregister_pm_notifier(&ftrace_suspend_notifier);
Steven Rostedt38516ab2010-04-20 17:04:50 -04004159 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004160
Steven Rostedt2aad1b72009-03-30 11:11:28 -04004161 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004162 mutex_unlock(&ftrace_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004163}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004164
Steven Rostedt868baf02011-02-10 21:26:13 -05004165static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4166
4167static void
4168graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4169{
4170 atomic_set(&t->tracing_graph_pause, 0);
4171 atomic_set(&t->trace_overrun, 0);
4172 t->ftrace_timestamp = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004173 /* make curr_ret_stack visible before we add the ret_stack */
Steven Rostedt868baf02011-02-10 21:26:13 -05004174 smp_wmb();
4175 t->ret_stack = ret_stack;
4176}
4177
4178/*
4179 * Allocate a return stack for the idle task. May be the first
4180 * time through, or it may be done by CPU hotplug online.
4181 */
4182void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4183{
4184 t->curr_ret_stack = -1;
4185 /*
4186 * The idle task has no parent, it either has its own
4187 * stack or no stack at all.
4188 */
4189 if (t->ret_stack)
4190 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4191
4192 if (ftrace_graph_active) {
4193 struct ftrace_ret_stack *ret_stack;
4194
4195 ret_stack = per_cpu(idle_ret_stack, cpu);
4196 if (!ret_stack) {
4197 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4198 * sizeof(struct ftrace_ret_stack),
4199 GFP_KERNEL);
4200 if (!ret_stack)
4201 return;
4202 per_cpu(idle_ret_stack, cpu) = ret_stack;
4203 }
4204 graph_init_task(t, ret_stack);
4205 }
4206}
4207
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004208/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004209void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004210{
Steven Rostedt84047e32009-06-02 16:51:55 -04004211 /* Make sure we do not use the parent ret_stack */
4212 t->ret_stack = NULL;
Steven Rostedtea14eb72010-03-12 19:41:23 -05004213 t->curr_ret_stack = -1;
Steven Rostedt84047e32009-06-02 16:51:55 -04004214
Steven Rostedt597af812009-04-03 15:24:12 -04004215 if (ftrace_graph_active) {
Steven Rostedt82310a32009-06-02 12:26:07 -04004216 struct ftrace_ret_stack *ret_stack;
4217
4218 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004219 * sizeof(struct ftrace_ret_stack),
4220 GFP_KERNEL);
Steven Rostedt82310a32009-06-02 12:26:07 -04004221 if (!ret_stack)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004222 return;
Steven Rostedt868baf02011-02-10 21:26:13 -05004223 graph_init_task(t, ret_stack);
Steven Rostedt84047e32009-06-02 16:51:55 -04004224 }
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004225}
4226
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004227void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004228{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01004229 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4230
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004231 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01004232 /* NULL must become visible to IRQs before we free it: */
4233 barrier();
4234
4235 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004236}
Steven Rostedt14a866c2008-12-02 23:50:02 -05004237
4238void ftrace_graph_stop(void)
4239{
4240 ftrace_stop();
4241}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004242#endif