blob: a0910c0cdf2eabfa690d7cf434a5467faa5e01e4 [file] [log] [blame]
Steven Rostedt1b29b012008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010010 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt1b29b012008-05-12 21:20:42 +020011 */
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -050012#include <linux/ring_buffer.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020013#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050016#include <linux/slab.h>
Ingo Molnar2e0f5762008-05-12 21:20:49 +020017#include <linux/fs.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020018
19#include "trace.h"
20
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050021static void tracing_start_function_trace(struct trace_array *tr);
22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050029static struct tracer_flags func_flags;
Steven Rostedta225cdd2009-01-15 23:06:03 -050030
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050031/* Our option */
32enum {
33 TRACE_FUNC_OPT_STACK = 0x1,
34};
Steven Rostedt53614992009-01-15 19:12:40 -050035
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050036static int allocate_ftrace_ops(struct trace_array *tr)
37{
38 struct ftrace_ops *ops;
39
40 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
41 if (!ops)
42 return -ENOMEM;
43
44 /* Currently only the non stack verision is supported */
45 ops->func = function_trace_call;
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -040046 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050047
48 tr->ops = ops;
49 ops->private = tr;
50 return 0;
51}
Steven Rostedta225cdd2009-01-15 23:06:03 -050052
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050053
54int ftrace_create_function_files(struct trace_array *tr,
55 struct dentry *parent)
56{
57 int ret;
58
Steven Rostedt (Red Hat)5d6c97c2014-04-16 19:21:53 -040059 /*
60 * The top level array uses the "global_ops", and the files are
61 * created on boot up.
62 */
63 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
64 return 0;
65
66 ret = allocate_ftrace_ops(tr);
67 if (ret)
68 return ret;
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050069
70 ftrace_create_filter_files(tr->ops, parent);
71
72 return 0;
73}
74
75void ftrace_destroy_function_files(struct trace_array *tr)
76{
77 ftrace_destroy_filter_files(tr->ops);
78 kfree(tr->ops);
79 tr->ops = NULL;
80}
81
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -020082static int function_trace_init(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +020083{
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050084 ftrace_func_t func;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050085
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050086 /*
87 * Instance trace_arrays get their ops allocated
88 * at instance creation. Unless it failed
89 * the allocation.
90 */
91 if (!tr->ops)
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050092 return -ENOMEM;
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050093
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
98 else
99 func = function_trace_call;
100
101 ftrace_init_array_ops(tr, func);
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500102
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500103 tr->trace_buffer.cpu = get_cpu();
Steven Rostedt26bc83f2008-07-10 20:58:14 -0400104 put_cpu();
105
Steven Rostedt41bc8142008-05-22 11:49:22 -0400106 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500107 tracing_start_function_trace(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100108 return 0;
Steven Rostedt1b29b012008-05-12 21:20:42 +0200109}
110
Ingo Molnare309b412008-05-12 21:20:51 +0200111static void function_trace_reset(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +0200112{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500113 tracing_stop_function_trace(tr);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200114 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500115 ftrace_reset_array_ops(tr);
Steven Rostedt1b29b012008-05-12 21:20:42 +0200116}
117
Steven Rostedt90369902008-11-05 16:05:44 -0500118static void function_trace_start(struct trace_array *tr)
119{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500120 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt90369902008-11-05 16:05:44 -0500121}
122
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500123static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400124function_trace_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400125 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500126{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500127 struct trace_array *tr = op->private;
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500128 struct trace_array_cpu *data;
129 unsigned long flags;
Steven Rostedtd41032a2013-01-24 07:52:34 -0500130 int bit;
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500131 int cpu;
132 int pc;
133
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500134 if (unlikely(!tr->function_enabled))
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500135 return;
136
Steven Rostedt897f68a2012-11-02 17:52:35 -0400137 pc = preempt_count();
138 preempt_disable_notrace();
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500139
Steven Rostedt897f68a2012-11-02 17:52:35 -0400140 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
141 if (bit < 0)
142 goto out;
143
144 cpu = smp_processor_id();
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500145 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt897f68a2012-11-02 17:52:35 -0400146 if (!atomic_read(&data->disabled)) {
147 local_save_flags(flags);
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500148 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500149 }
Steven Rostedt897f68a2012-11-02 17:52:35 -0400150 trace_clear_recursion(bit);
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500151
Steven Rostedt897f68a2012-11-02 17:52:35 -0400152 out:
153 preempt_enable_notrace();
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500154}
155
156static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400158 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt53614992009-01-15 19:12:40 -0500159{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500160 struct trace_array *tr = op->private;
Steven Rostedt53614992009-01-15 19:12:40 -0500161 struct trace_array_cpu *data;
162 unsigned long flags;
163 long disabled;
164 int cpu;
165 int pc;
166
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500167 if (unlikely(!tr->function_enabled))
Steven Rostedt53614992009-01-15 19:12:40 -0500168 return;
169
170 /*
171 * Need to use raw, since this must be called before the
172 * recursive protection is performed.
173 */
174 local_irq_save(flags);
175 cpu = raw_smp_processor_id();
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500176 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt53614992009-01-15 19:12:40 -0500177 disabled = atomic_inc_return(&data->disabled);
178
179 if (likely(disabled == 1)) {
180 pc = preempt_count();
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500181 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt53614992009-01-15 19:12:40 -0500182 /*
183 * skip over 5 funcs:
184 * __ftrace_trace_stack,
185 * __trace_stack,
186 * function_stack_trace_call
187 * ftrace_list_func
188 * ftrace_call
189 */
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500190 __trace_stack(tr, flags, 5, pc);
Steven Rostedt53614992009-01-15 19:12:40 -0500191 }
192
193 atomic_dec(&data->disabled);
194 local_irq_restore(flags);
195}
196
Steven Rostedt53614992009-01-15 19:12:40 -0500197static struct tracer_opt func_opts[] = {
198#ifdef CONFIG_STACKTRACE
199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200#endif
201 { } /* Always set a last empty entry */
202};
203
204static struct tracer_flags func_flags = {
205 .val = 0, /* By default: all flags disabled */
206 .opts = func_opts
207};
208
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500209static void tracing_start_function_trace(struct trace_array *tr)
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500210{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500211 tr->function_enabled = 0;
212 register_ftrace_function(tr->ops);
213 tr->function_enabled = 1;
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500214}
215
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500216static void tracing_stop_function_trace(struct trace_array *tr)
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500217{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500218 tr->function_enabled = 0;
219 unregister_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500220}
221
Chunyu Hud39cdd22016-03-08 21:37:01 +0800222static struct tracer function_trace;
223
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -0500224static int
225func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Steven Rostedt53614992009-01-15 19:12:40 -0500226{
Anton Vorontsovf555f122012-07-09 17:10:46 -0700227 switch (bit) {
228 case TRACE_FUNC_OPT_STACK:
Steven Rostedt53614992009-01-15 19:12:40 -0500229 /* do nothing if already set */
230 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
Anton Vorontsovf555f122012-07-09 17:10:46 -0700231 break;
Steven Rostedt53614992009-01-15 19:12:40 -0500232
Chunyu Hud39cdd22016-03-08 21:37:01 +0800233 /* We can change this flag when not running. */
234 if (tr->current_trace != &function_trace)
235 break;
236
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500237 unregister_ftrace_function(tr->ops);
238
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500239 if (set) {
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500240 tr->ops->func = function_stack_trace_call;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500241 register_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500242 } else {
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500243 tr->ops->func = function_trace_call;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500244 register_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500245 }
Steven Rostedt53614992009-01-15 19:12:40 -0500246
Anton Vorontsovf555f122012-07-09 17:10:46 -0700247 break;
Anton Vorontsovf555f122012-07-09 17:10:46 -0700248 default:
249 return -EINVAL;
Steven Rostedt53614992009-01-15 19:12:40 -0500250 }
251
Anton Vorontsovf555f122012-07-09 17:10:46 -0700252 return 0;
Steven Rostedt53614992009-01-15 19:12:40 -0500253}
254
Steven Rostedt (Red Hat)8f768992013-07-18 14:41:51 -0400255static struct tracer function_trace __tracer_data =
Steven Rostedt1b29b012008-05-12 21:20:42 +0200256{
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500257 .name = "function",
258 .init = function_trace_init,
259 .reset = function_trace_reset,
260 .start = function_trace_start,
Steven Rostedt53614992009-01-15 19:12:40 -0500261 .flags = &func_flags,
262 .set_flag = func_set_flag,
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500263 .allow_instances = true,
Steven Rostedt60a11772008-05-12 21:20:44 +0200264#ifdef CONFIG_FTRACE_SELFTEST
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500265 .selftest = trace_selftest_startup_function,
Steven Rostedt60a11772008-05-12 21:20:44 +0200266#endif
Steven Rostedt1b29b012008-05-12 21:20:42 +0200267};
268
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500269#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400270static void update_traceon_count(struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400271 unsigned long ip,
272 struct trace_array *tr, bool on,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400273 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500274{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400275 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400276 long *count;
277 long old_count;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500278
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500279 /*
280 * Tracing gets disabled (or enabled) once per count.
Steven Rostedt (Red Hat)0af26492014-11-20 10:05:36 -0500281 * This function can be called at the same time on multiple CPUs.
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500282 * It is fine if both disable (or enable) tracing, as disabling
283 * (or enabling) the second time doesn't do anything as the
284 * state of the tracer is already disabled (or enabled).
285 * What needs to be synchronized in this case is that the count
286 * only gets decremented once, even if the tracer is disabled
287 * (or enabled) twice, as the second one is really a nop.
288 *
289 * The memory barriers guarantee that we only decrement the
290 * counter once. First the count is read to a local variable
291 * and a read barrier is used to make sure that it is loaded
292 * before checking if the tracer is in the state we want.
293 * If the tracer is not in the state we want, then the count
294 * is guaranteed to be the old count.
295 *
296 * Next the tracer is set to the state we want (disabled or enabled)
297 * then a write memory barrier is used to make sure that
298 * the new state is visible before changing the counter by
299 * one minus the old counter. This guarantees that another CPU
300 * executing this code will see the new state before seeing
Steven Rostedt (Red Hat)0af26492014-11-20 10:05:36 -0500301 * the new counter value, and would not do anything if the new
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500302 * counter is seen.
303 *
304 * Note, there is no synchronization between this and a user
305 * setting the tracing_on file. But we currently don't care
306 * about that.
307 */
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400308 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
309 old_count = *count;
310
311 if (old_count <= 0)
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500312 return;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500313
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500314 /* Make sure we see count before checking tracing state */
315 smp_rmb();
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500316
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400317 if (on == !!tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500318 return;
319
320 if (on)
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400321 tracer_tracing_on(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500322 else
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400323 tracer_tracing_off(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500324
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500325 /* Make sure tracing state is visible before updating count */
326 smp_wmb();
327
328 *count = old_count - 1;
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500329}
330
331static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400332ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400333 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400334 void *data)
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500335{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400336 update_traceon_count(ops, ip, tr, 1, data);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500337}
338
339static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400340ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400341 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400342 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500343{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400344 update_traceon_count(ops, ip, tr, 0, data);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500345}
346
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500347static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400348ftrace_traceon(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400349 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400350 void *data)
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500351{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400352 if (tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500353 return;
354
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400355 tracer_tracing_on(tr);
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500356}
357
358static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400359ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400360 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400361 void *data)
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500362{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400363 if (!tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500364 return;
365
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400366 tracer_tracing_off(tr);
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500367}
368
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400369/*
370 * Skip 4:
371 * ftrace_stacktrace()
372 * function_trace_probe_call()
373 * ftrace_ops_list_func()
374 * ftrace_call()
375 */
376#define STACK_SKIP 4
Steven Rostedte110e3d2009-02-16 23:38:13 -0500377
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400378static __always_inline void trace_stack(struct trace_array *tr)
379{
380 unsigned long flags;
381 int pc;
382
383 local_save_flags(flags);
384 pc = preempt_count();
385
386 __trace_stack(tr, flags, STACK_SKIP, pc);
387}
388
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400389static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400390ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400391 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400392 void *data)
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400393{
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400394 trace_stack(tr);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400395}
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500396
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400397static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400398ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400399 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400400 void *data)
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400401{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400402 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400403 long *count;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500404 long old_count;
405 long new_count;
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500406
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400407 if (!tracing_is_on())
408 return;
409
410 /* unlimited? */
411 if (!mapper) {
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400412 trace_stack(tr);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400413 return;
414 }
415
416 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
417
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500418 /*
419 * Stack traces should only execute the number of times the
420 * user specified in the counter.
421 */
422 do {
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500423 old_count = *count;
424
425 if (!old_count)
426 return;
427
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500428 new_count = old_count - 1;
429 new_count = cmpxchg(count, old_count, new_count);
430 if (new_count == old_count)
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400431 trace_stack(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500432
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400433 if (!tracing_is_on())
434 return;
435
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500436 } while (new_count != old_count);
437}
438
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400439static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
440 void *data)
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500441{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400442 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400443 long *count = NULL;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500444
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400445 if (mapper)
446 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500447
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400448 if (count) {
449 if (*count <= 0)
450 return 0;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500451 (*count)--;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400452 }
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500453
454 return 1;
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400455}
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500456
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400457static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400458ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400459 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400460 void *data)
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400461{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400462 if (update_count(ops, ip, data))
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400463 ftrace_dump(DUMP_ALL);
464}
465
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400466/* Only dump the current CPU buffer. */
467static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400468ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400469 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400470 void *data)
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400471{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400472 if (update_count(ops, ip, data))
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400473 ftrace_dump(DUMP_ORIG);
474}
475
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500476static int
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400477ftrace_probe_print(const char *name, struct seq_file *m,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400478 unsigned long ip, struct ftrace_probe_ops *ops,
479 void *data)
Steven Rostedte110e3d2009-02-16 23:38:13 -0500480{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400481 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400482 long *count = NULL;
Steven Rostedte110e3d2009-02-16 23:38:13 -0500483
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400484 seq_printf(m, "%ps:%s", (void *)ip, name);
Steven Rostedte110e3d2009-02-16 23:38:13 -0500485
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400486 if (mapper)
487 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
488
489 if (count)
490 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt35ebf1c2009-02-17 13:12:12 -0500491 else
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400492 seq_puts(m, ":unlimited\n");
Steven Rostedte110e3d2009-02-16 23:38:13 -0500493
494 return 0;
495}
496
497static int
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400498ftrace_traceon_print(struct seq_file *m, unsigned long ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400499 struct ftrace_probe_ops *ops,
500 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500501{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400502 return ftrace_probe_print("traceon", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400503}
504
505static int
506ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
507 struct ftrace_probe_ops *ops, void *data)
508{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400509 return ftrace_probe_print("traceoff", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400510}
511
512static int
513ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
514 struct ftrace_probe_ops *ops, void *data)
515{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400516 return ftrace_probe_print("stacktrace", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400517}
518
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400519static int
520ftrace_dump_print(struct seq_file *m, unsigned long ip,
521 struct ftrace_probe_ops *ops, void *data)
522{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400523 return ftrace_probe_print("dump", m, ip, ops, data);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400524}
525
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400526static int
527ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
528 struct ftrace_probe_ops *ops, void *data)
529{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400530 return ftrace_probe_print("cpudump", m, ip, ops, data);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400531}
532
533
534static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400535ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400536 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400537{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400538 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400539
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400540 if (!mapper) {
541 mapper = allocate_ftrace_func_mapper();
542 if (!mapper)
543 return -ENOMEM;
544 *data = mapper;
545 }
546
547 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400548}
549
550static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400551ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400552 unsigned long ip, void *data)
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400553{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400554 struct ftrace_func_mapper *mapper = data;
555
556 if (!ip) {
557 free_ftrace_func_mapper(mapper, NULL);
558 return;
559 }
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400560
561 ftrace_func_mapper_remove_ip(mapper, ip);
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400562}
563
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400564static struct ftrace_probe_ops traceon_count_probe_ops = {
565 .func = ftrace_traceon_count,
566 .print = ftrace_traceon_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400567 .init = ftrace_count_init,
568 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400569};
570
571static struct ftrace_probe_ops traceoff_count_probe_ops = {
572 .func = ftrace_traceoff_count,
573 .print = ftrace_traceoff_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400574 .init = ftrace_count_init,
575 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400576};
577
578static struct ftrace_probe_ops stacktrace_count_probe_ops = {
579 .func = ftrace_stacktrace_count,
580 .print = ftrace_stacktrace_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400581 .init = ftrace_count_init,
582 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400583};
584
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400585static struct ftrace_probe_ops dump_probe_ops = {
586 .func = ftrace_dump_probe,
587 .print = ftrace_dump_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400588 .init = ftrace_count_init,
589 .free = ftrace_count_free,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400590};
591
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400592static struct ftrace_probe_ops cpudump_probe_ops = {
593 .func = ftrace_cpudump_probe,
594 .print = ftrace_cpudump_print,
595};
596
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400597static struct ftrace_probe_ops traceon_probe_ops = {
598 .func = ftrace_traceon,
599 .print = ftrace_traceon_print,
600};
601
602static struct ftrace_probe_ops traceoff_probe_ops = {
603 .func = ftrace_traceoff,
604 .print = ftrace_traceoff_print,
605};
606
607static struct ftrace_probe_ops stacktrace_probe_ops = {
608 .func = ftrace_stacktrace,
609 .print = ftrace_stacktrace_print,
610};
611
612static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400613ftrace_trace_probe_callback(struct trace_array *tr,
614 struct ftrace_probe_ops *ops,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400615 struct ftrace_hash *hash, char *glob,
616 char *cmd, char *param, int enable)
617{
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500618 void *count = (void *)-1;
619 char *number;
620 int ret;
621
622 /* hash funcs only work with set_ftrace_filter */
623 if (!enable)
624 return -EINVAL;
625
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -0400626 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -0400627 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)8b8fa622013-03-12 09:25:00 -0400628
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500629 if (!param)
630 goto out_reg;
631
632 number = strsep(&param, ":");
633
634 if (!strlen(number))
635 goto out_reg;
636
637 /*
638 * We use the callback data field (which is a pointer)
639 * as our counter.
640 */
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200641 ret = kstrtoul(number, 0, (unsigned long *)&count);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500642 if (ret)
643 return ret;
644
645 out_reg:
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400646 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500647
Xiao Guangrong04aef322009-07-15 12:29:06 +0800648 return ret < 0 ? ret : 0;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500649}
650
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400651static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400652ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400653 char *glob, char *cmd, char *param, int enable)
654{
655 struct ftrace_probe_ops *ops;
656
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400657 if (!tr)
658 return -ENODEV;
659
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400660 /* we register both traceon and traceoff to this callback */
661 if (strcmp(cmd, "traceon") == 0)
662 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
663 else
664 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
665
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400666 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400667 param, enable);
668}
669
670static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400671ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400672 char *glob, char *cmd, char *param, int enable)
673{
674 struct ftrace_probe_ops *ops;
675
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400676 if (!tr)
677 return -ENODEV;
678
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400679 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
680
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400681 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400682 param, enable);
683}
684
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400685static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400686ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400687 char *glob, char *cmd, char *param, int enable)
688{
689 struct ftrace_probe_ops *ops;
690
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400691 if (!tr)
692 return -ENODEV;
693
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400694 ops = &dump_probe_ops;
695
696 /* Only dump once. */
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400697 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400698 "1", enable);
699}
700
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400701static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400702ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400703 char *glob, char *cmd, char *param, int enable)
704{
705 struct ftrace_probe_ops *ops;
706
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400707 if (!tr)
708 return -ENODEV;
709
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400710 ops = &cpudump_probe_ops;
711
712 /* Only dump once. */
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400713 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400714 "1", enable);
715}
716
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500717static struct ftrace_func_command ftrace_traceon_cmd = {
718 .name = "traceon",
719 .func = ftrace_trace_onoff_callback,
720};
721
722static struct ftrace_func_command ftrace_traceoff_cmd = {
723 .name = "traceoff",
724 .func = ftrace_trace_onoff_callback,
725};
726
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400727static struct ftrace_func_command ftrace_stacktrace_cmd = {
728 .name = "stacktrace",
729 .func = ftrace_stacktrace_callback,
730};
731
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400732static struct ftrace_func_command ftrace_dump_cmd = {
733 .name = "dump",
734 .func = ftrace_dump_callback,
735};
736
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400737static struct ftrace_func_command ftrace_cpudump_cmd = {
738 .name = "cpudump",
739 .func = ftrace_cpudump_callback,
740};
741
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500742static int __init init_func_cmd_traceon(void)
743{
744 int ret;
745
746 ret = register_ftrace_command(&ftrace_traceoff_cmd);
747 if (ret)
748 return ret;
749
750 ret = register_ftrace_command(&ftrace_traceon_cmd);
751 if (ret)
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400752 goto out_free_traceoff;
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400753
754 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400755 if (ret)
756 goto out_free_traceon;
757
758 ret = register_ftrace_command(&ftrace_dump_cmd);
759 if (ret)
760 goto out_free_stacktrace;
761
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400762 ret = register_ftrace_command(&ftrace_cpudump_cmd);
763 if (ret)
764 goto out_free_dump;
765
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400766 return 0;
767
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400768 out_free_dump:
769 unregister_ftrace_command(&ftrace_dump_cmd);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400770 out_free_stacktrace:
771 unregister_ftrace_command(&ftrace_stacktrace_cmd);
772 out_free_traceon:
773 unregister_ftrace_command(&ftrace_traceon_cmd);
774 out_free_traceoff:
775 unregister_ftrace_command(&ftrace_traceoff_cmd);
776
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500777 return ret;
778}
779#else
780static inline int init_func_cmd_traceon(void)
781{
782 return 0;
783}
784#endif /* CONFIG_DYNAMIC_FTRACE */
785
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -0500786__init int init_function_trace(void)
Steven Rostedt1b29b012008-05-12 21:20:42 +0200787{
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500788 init_func_cmd_traceon();
Steven Rostedt1b29b012008-05-12 21:20:42 +0200789 return register_tracer(&function_trace);
790}