blob: dd4dff71d89adce177301657ae0274402f109404 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Steven Rostedt1b29b012008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010011 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt1b29b012008-05-12 21:20:42 +020012 */
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -050013#include <linux/ring_buffer.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020014#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050017#include <linux/slab.h>
Ingo Molnar2e0f5762008-05-12 21:20:49 +020018#include <linux/fs.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020019
20#include "trace.h"
21
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050022static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct pt_regs *pt_regs);
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct pt_regs *pt_regs);
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050030static struct tracer_flags func_flags;
Steven Rostedta225cdd2009-01-15 23:06:03 -050031
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050032/* Our option */
33enum {
34 TRACE_FUNC_OPT_STACK = 0x1,
35};
Steven Rostedt53614992009-01-15 19:12:40 -050036
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050037static int allocate_ftrace_ops(struct trace_array *tr)
38{
39 struct ftrace_ops *ops;
40
41 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
42 if (!ops)
43 return -ENOMEM;
44
Wei Yang48a42f52020-06-10 11:32:51 +080045 /* Currently only the non stack version is supported */
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050046 ops->func = function_trace_call;
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -040047 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050048
49 tr->ops = ops;
50 ops->private = tr;
51 return 0;
52}
Steven Rostedta225cdd2009-01-15 23:06:03 -050053
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050054
55int ftrace_create_function_files(struct trace_array *tr,
56 struct dentry *parent)
57{
58 int ret;
59
Steven Rostedt (Red Hat)5d6c97c2014-04-16 19:21:53 -040060 /*
61 * The top level array uses the "global_ops", and the files are
62 * created on boot up.
63 */
64 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
65 return 0;
66
67 ret = allocate_ftrace_ops(tr);
68 if (ret)
69 return ret;
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050070
71 ftrace_create_filter_files(tr->ops, parent);
72
73 return 0;
74}
75
76void ftrace_destroy_function_files(struct trace_array *tr)
77{
78 ftrace_destroy_filter_files(tr->ops);
79 kfree(tr->ops);
80 tr->ops = NULL;
81}
82
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -020083static int function_trace_init(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +020084{
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050085 ftrace_func_t func;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050086
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050087 /*
88 * Instance trace_arrays get their ops allocated
89 * at instance creation. Unless it failed
90 * the allocation.
91 */
92 if (!tr->ops)
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050093 return -ENOMEM;
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050094
95 /* Currently only the global instance can do stack tracing */
96 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
97 func_flags.val & TRACE_FUNC_OPT_STACK)
98 func = function_stack_trace_call;
99 else
100 func = function_trace_call;
101
102 ftrace_init_array_ops(tr, func);
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500103
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500104 tr->array_buffer.cpu = get_cpu();
Steven Rostedt26bc83f2008-07-10 20:58:14 -0400105 put_cpu();
106
Steven Rostedt41bc8142008-05-22 11:49:22 -0400107 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500108 tracing_start_function_trace(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100109 return 0;
Steven Rostedt1b29b012008-05-12 21:20:42 +0200110}
111
Ingo Molnare309b412008-05-12 21:20:51 +0200112static void function_trace_reset(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +0200113{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500114 tracing_stop_function_trace(tr);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200115 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500116 ftrace_reset_array_ops(tr);
Steven Rostedt1b29b012008-05-12 21:20:42 +0200117}
118
Steven Rostedt90369902008-11-05 16:05:44 -0500119static void function_trace_start(struct trace_array *tr)
120{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500121 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt90369902008-11-05 16:05:44 -0500122}
123
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500124static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400125function_trace_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400126 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500127{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500128 struct trace_array *tr = op->private;
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500129 struct trace_array_cpu *data;
130 unsigned long flags;
Steven Rostedtd41032a2013-01-24 07:52:34 -0500131 int bit;
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500132 int cpu;
133 int pc;
134
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500135 if (unlikely(!tr->function_enabled))
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500136 return;
137
Steven Rostedt897f68a2012-11-02 17:52:35 -0400138 pc = preempt_count();
139 preempt_disable_notrace();
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500140
Steven Rostedt897f68a2012-11-02 17:52:35 -0400141 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
142 if (bit < 0)
143 goto out;
144
145 cpu = smp_processor_id();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500146 data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt897f68a2012-11-02 17:52:35 -0400147 if (!atomic_read(&data->disabled)) {
148 local_save_flags(flags);
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500149 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500150 }
Steven Rostedt897f68a2012-11-02 17:52:35 -0400151 trace_clear_recursion(bit);
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500152
Steven Rostedt897f68a2012-11-02 17:52:35 -0400153 out:
154 preempt_enable_notrace();
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500155}
156
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500157#ifdef CONFIG_UNWINDER_ORC
158/*
159 * Skip 2:
160 *
161 * function_stack_trace_call()
162 * ftrace_call()
163 */
164#define STACK_SKIP 2
165#else
166/*
167 * Skip 3:
168 * __trace_stack()
169 * function_stack_trace_call()
170 * ftrace_call()
171 */
172#define STACK_SKIP 3
173#endif
174
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500175static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400176function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400177 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt53614992009-01-15 19:12:40 -0500178{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500179 struct trace_array *tr = op->private;
Steven Rostedt53614992009-01-15 19:12:40 -0500180 struct trace_array_cpu *data;
181 unsigned long flags;
182 long disabled;
183 int cpu;
184 int pc;
185
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500186 if (unlikely(!tr->function_enabled))
Steven Rostedt53614992009-01-15 19:12:40 -0500187 return;
188
189 /*
190 * Need to use raw, since this must be called before the
191 * recursive protection is performed.
192 */
193 local_irq_save(flags);
194 cpu = raw_smp_processor_id();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500195 data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt53614992009-01-15 19:12:40 -0500196 disabled = atomic_inc_return(&data->disabled);
197
198 if (likely(disabled == 1)) {
199 pc = preempt_count();
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500200 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500201 __trace_stack(tr, flags, STACK_SKIP, pc);
Steven Rostedt53614992009-01-15 19:12:40 -0500202 }
203
204 atomic_dec(&data->disabled);
205 local_irq_restore(flags);
206}
207
Steven Rostedt53614992009-01-15 19:12:40 -0500208static struct tracer_opt func_opts[] = {
209#ifdef CONFIG_STACKTRACE
210 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
211#endif
212 { } /* Always set a last empty entry */
213};
214
215static struct tracer_flags func_flags = {
216 .val = 0, /* By default: all flags disabled */
217 .opts = func_opts
218};
219
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500220static void tracing_start_function_trace(struct trace_array *tr)
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500221{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500222 tr->function_enabled = 0;
223 register_ftrace_function(tr->ops);
224 tr->function_enabled = 1;
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500225}
226
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500227static void tracing_stop_function_trace(struct trace_array *tr)
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500228{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500229 tr->function_enabled = 0;
230 unregister_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500231}
232
Chunyu Hud39cdd22016-03-08 21:37:01 +0800233static struct tracer function_trace;
234
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -0500235static int
236func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Steven Rostedt53614992009-01-15 19:12:40 -0500237{
Anton Vorontsovf555f122012-07-09 17:10:46 -0700238 switch (bit) {
239 case TRACE_FUNC_OPT_STACK:
Steven Rostedt53614992009-01-15 19:12:40 -0500240 /* do nothing if already set */
241 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
Anton Vorontsovf555f122012-07-09 17:10:46 -0700242 break;
Steven Rostedt53614992009-01-15 19:12:40 -0500243
Chunyu Hud39cdd22016-03-08 21:37:01 +0800244 /* We can change this flag when not running. */
245 if (tr->current_trace != &function_trace)
246 break;
247
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500248 unregister_ftrace_function(tr->ops);
249
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500250 if (set) {
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500251 tr->ops->func = function_stack_trace_call;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500252 register_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500253 } else {
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500254 tr->ops->func = function_trace_call;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500255 register_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500256 }
Steven Rostedt53614992009-01-15 19:12:40 -0500257
Anton Vorontsovf555f122012-07-09 17:10:46 -0700258 break;
Anton Vorontsovf555f122012-07-09 17:10:46 -0700259 default:
260 return -EINVAL;
Steven Rostedt53614992009-01-15 19:12:40 -0500261 }
262
Anton Vorontsovf555f122012-07-09 17:10:46 -0700263 return 0;
Steven Rostedt53614992009-01-15 19:12:40 -0500264}
265
Steven Rostedt (Red Hat)8f768992013-07-18 14:41:51 -0400266static struct tracer function_trace __tracer_data =
Steven Rostedt1b29b012008-05-12 21:20:42 +0200267{
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500268 .name = "function",
269 .init = function_trace_init,
270 .reset = function_trace_reset,
271 .start = function_trace_start,
Steven Rostedt53614992009-01-15 19:12:40 -0500272 .flags = &func_flags,
273 .set_flag = func_set_flag,
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500274 .allow_instances = true,
Steven Rostedt60a11772008-05-12 21:20:44 +0200275#ifdef CONFIG_FTRACE_SELFTEST
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500276 .selftest = trace_selftest_startup_function,
Steven Rostedt60a11772008-05-12 21:20:44 +0200277#endif
Steven Rostedt1b29b012008-05-12 21:20:42 +0200278};
279
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500280#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400281static void update_traceon_count(struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400282 unsigned long ip,
283 struct trace_array *tr, bool on,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400284 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500285{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400286 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400287 long *count;
288 long old_count;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500289
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500290 /*
291 * Tracing gets disabled (or enabled) once per count.
Steven Rostedt (Red Hat)0af26492014-11-20 10:05:36 -0500292 * This function can be called at the same time on multiple CPUs.
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500293 * It is fine if both disable (or enable) tracing, as disabling
294 * (or enabling) the second time doesn't do anything as the
295 * state of the tracer is already disabled (or enabled).
296 * What needs to be synchronized in this case is that the count
297 * only gets decremented once, even if the tracer is disabled
298 * (or enabled) twice, as the second one is really a nop.
299 *
300 * The memory barriers guarantee that we only decrement the
301 * counter once. First the count is read to a local variable
302 * and a read barrier is used to make sure that it is loaded
303 * before checking if the tracer is in the state we want.
304 * If the tracer is not in the state we want, then the count
305 * is guaranteed to be the old count.
306 *
307 * Next the tracer is set to the state we want (disabled or enabled)
308 * then a write memory barrier is used to make sure that
309 * the new state is visible before changing the counter by
310 * one minus the old counter. This guarantees that another CPU
311 * executing this code will see the new state before seeing
Steven Rostedt (Red Hat)0af26492014-11-20 10:05:36 -0500312 * the new counter value, and would not do anything if the new
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500313 * counter is seen.
314 *
315 * Note, there is no synchronization between this and a user
316 * setting the tracing_on file. But we currently don't care
317 * about that.
318 */
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400319 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
320 old_count = *count;
321
322 if (old_count <= 0)
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500323 return;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500324
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500325 /* Make sure we see count before checking tracing state */
326 smp_rmb();
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500327
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400328 if (on == !!tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500329 return;
330
331 if (on)
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400332 tracer_tracing_on(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500333 else
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400334 tracer_tracing_off(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500335
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500336 /* Make sure tracing state is visible before updating count */
337 smp_wmb();
338
339 *count = old_count - 1;
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500340}
341
342static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400343ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400344 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400345 void *data)
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500346{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400347 update_traceon_count(ops, ip, tr, 1, data);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500348}
349
350static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400351ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400352 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400353 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500354{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400355 update_traceon_count(ops, ip, tr, 0, data);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500356}
357
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500358static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400359ftrace_traceon(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400360 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400361 void *data)
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500362{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400363 if (tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500364 return;
365
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400366 tracer_tracing_on(tr);
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500367}
368
369static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400370ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400371 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400372 void *data)
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500373{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400374 if (!tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500375 return;
376
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400377 tracer_tracing_off(tr);
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500378}
379
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500380#ifdef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400381/*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500382 * Skip 3:
383 *
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400384 * function_trace_probe_call()
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500385 * ftrace_ops_assist_func()
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400386 * ftrace_call()
387 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500388#define FTRACE_STACK_SKIP 3
389#else
390/*
391 * Skip 5:
392 *
393 * __trace_stack()
394 * ftrace_stacktrace()
395 * function_trace_probe_call()
396 * ftrace_ops_assist_func()
397 * ftrace_call()
398 */
399#define FTRACE_STACK_SKIP 5
400#endif
Steven Rostedte110e3d2009-02-16 23:38:13 -0500401
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400402static __always_inline void trace_stack(struct trace_array *tr)
403{
404 unsigned long flags;
405 int pc;
406
407 local_save_flags(flags);
408 pc = preempt_count();
409
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500410 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400411}
412
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400413static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400414ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400415 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400416 void *data)
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400417{
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400418 trace_stack(tr);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400419}
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500420
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400421static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400422ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400423 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400424 void *data)
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400425{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400426 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400427 long *count;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500428 long old_count;
429 long new_count;
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500430
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400431 if (!tracing_is_on())
432 return;
433
434 /* unlimited? */
435 if (!mapper) {
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400436 trace_stack(tr);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400437 return;
438 }
439
440 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
441
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500442 /*
443 * Stack traces should only execute the number of times the
444 * user specified in the counter.
445 */
446 do {
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500447 old_count = *count;
448
449 if (!old_count)
450 return;
451
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500452 new_count = old_count - 1;
453 new_count = cmpxchg(count, old_count, new_count);
454 if (new_count == old_count)
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400455 trace_stack(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500456
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400457 if (!tracing_is_on())
458 return;
459
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500460 } while (new_count != old_count);
461}
462
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400463static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
464 void *data)
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500465{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400466 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400467 long *count = NULL;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500468
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400469 if (mapper)
470 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500471
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400472 if (count) {
473 if (*count <= 0)
474 return 0;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500475 (*count)--;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400476 }
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500477
478 return 1;
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400479}
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500480
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400481static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400482ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400483 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400484 void *data)
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400485{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400486 if (update_count(ops, ip, data))
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400487 ftrace_dump(DUMP_ALL);
488}
489
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400490/* Only dump the current CPU buffer. */
491static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400492ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400493 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400494 void *data)
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400495{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400496 if (update_count(ops, ip, data))
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400497 ftrace_dump(DUMP_ORIG);
498}
499
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500500static int
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400501ftrace_probe_print(const char *name, struct seq_file *m,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400502 unsigned long ip, struct ftrace_probe_ops *ops,
503 void *data)
Steven Rostedte110e3d2009-02-16 23:38:13 -0500504{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400505 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400506 long *count = NULL;
Steven Rostedte110e3d2009-02-16 23:38:13 -0500507
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400508 seq_printf(m, "%ps:%s", (void *)ip, name);
Steven Rostedte110e3d2009-02-16 23:38:13 -0500509
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400510 if (mapper)
511 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
512
513 if (count)
514 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt35ebf1c2009-02-17 13:12:12 -0500515 else
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400516 seq_puts(m, ":unlimited\n");
Steven Rostedte110e3d2009-02-16 23:38:13 -0500517
518 return 0;
519}
520
521static int
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400522ftrace_traceon_print(struct seq_file *m, unsigned long ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400523 struct ftrace_probe_ops *ops,
524 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500525{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400526 return ftrace_probe_print("traceon", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400527}
528
529static int
530ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
531 struct ftrace_probe_ops *ops, void *data)
532{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400533 return ftrace_probe_print("traceoff", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400534}
535
536static int
537ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
538 struct ftrace_probe_ops *ops, void *data)
539{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400540 return ftrace_probe_print("stacktrace", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400541}
542
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400543static int
544ftrace_dump_print(struct seq_file *m, unsigned long ip,
545 struct ftrace_probe_ops *ops, void *data)
546{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400547 return ftrace_probe_print("dump", m, ip, ops, data);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400548}
549
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400550static int
551ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
552 struct ftrace_probe_ops *ops, void *data)
553{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400554 return ftrace_probe_print("cpudump", m, ip, ops, data);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400555}
556
557
558static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400559ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400560 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400561{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400562 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400563
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400564 if (!mapper) {
565 mapper = allocate_ftrace_func_mapper();
566 if (!mapper)
567 return -ENOMEM;
568 *data = mapper;
569 }
570
571 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400572}
573
574static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400575ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400576 unsigned long ip, void *data)
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400577{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400578 struct ftrace_func_mapper *mapper = data;
579
580 if (!ip) {
581 free_ftrace_func_mapper(mapper, NULL);
582 return;
583 }
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400584
585 ftrace_func_mapper_remove_ip(mapper, ip);
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400586}
587
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400588static struct ftrace_probe_ops traceon_count_probe_ops = {
589 .func = ftrace_traceon_count,
590 .print = ftrace_traceon_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400591 .init = ftrace_count_init,
592 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400593};
594
595static struct ftrace_probe_ops traceoff_count_probe_ops = {
596 .func = ftrace_traceoff_count,
597 .print = ftrace_traceoff_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400598 .init = ftrace_count_init,
599 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400600};
601
602static struct ftrace_probe_ops stacktrace_count_probe_ops = {
603 .func = ftrace_stacktrace_count,
604 .print = ftrace_stacktrace_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400605 .init = ftrace_count_init,
606 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400607};
608
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400609static struct ftrace_probe_ops dump_probe_ops = {
610 .func = ftrace_dump_probe,
611 .print = ftrace_dump_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400612 .init = ftrace_count_init,
613 .free = ftrace_count_free,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400614};
615
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400616static struct ftrace_probe_ops cpudump_probe_ops = {
617 .func = ftrace_cpudump_probe,
618 .print = ftrace_cpudump_print,
619};
620
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400621static struct ftrace_probe_ops traceon_probe_ops = {
622 .func = ftrace_traceon,
623 .print = ftrace_traceon_print,
624};
625
626static struct ftrace_probe_ops traceoff_probe_ops = {
627 .func = ftrace_traceoff,
628 .print = ftrace_traceoff_print,
629};
630
631static struct ftrace_probe_ops stacktrace_probe_ops = {
632 .func = ftrace_stacktrace,
633 .print = ftrace_stacktrace_print,
634};
635
636static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400637ftrace_trace_probe_callback(struct trace_array *tr,
638 struct ftrace_probe_ops *ops,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400639 struct ftrace_hash *hash, char *glob,
640 char *cmd, char *param, int enable)
641{
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500642 void *count = (void *)-1;
643 char *number;
644 int ret;
645
646 /* hash funcs only work with set_ftrace_filter */
647 if (!enable)
648 return -EINVAL;
649
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -0400650 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -0400651 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)8b8fa622013-03-12 09:25:00 -0400652
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500653 if (!param)
654 goto out_reg;
655
656 number = strsep(&param, ":");
657
658 if (!strlen(number))
659 goto out_reg;
660
661 /*
662 * We use the callback data field (which is a pointer)
663 * as our counter.
664 */
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200665 ret = kstrtoul(number, 0, (unsigned long *)&count);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500666 if (ret)
667 return ret;
668
669 out_reg:
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400670 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500671
Xiao Guangrong04aef322009-07-15 12:29:06 +0800672 return ret < 0 ? ret : 0;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500673}
674
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400675static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400676ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400677 char *glob, char *cmd, char *param, int enable)
678{
679 struct ftrace_probe_ops *ops;
680
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400681 if (!tr)
682 return -ENODEV;
683
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400684 /* we register both traceon and traceoff to this callback */
685 if (strcmp(cmd, "traceon") == 0)
686 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
687 else
688 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
689
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400690 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400691 param, enable);
692}
693
694static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400695ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400696 char *glob, char *cmd, char *param, int enable)
697{
698 struct ftrace_probe_ops *ops;
699
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400700 if (!tr)
701 return -ENODEV;
702
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400703 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
704
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400705 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400706 param, enable);
707}
708
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400709static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400710ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400711 char *glob, char *cmd, char *param, int enable)
712{
713 struct ftrace_probe_ops *ops;
714
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400715 if (!tr)
716 return -ENODEV;
717
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400718 ops = &dump_probe_ops;
719
720 /* Only dump once. */
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400721 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400722 "1", enable);
723}
724
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400725static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400726ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400727 char *glob, char *cmd, char *param, int enable)
728{
729 struct ftrace_probe_ops *ops;
730
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400731 if (!tr)
732 return -ENODEV;
733
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400734 ops = &cpudump_probe_ops;
735
736 /* Only dump once. */
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400737 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400738 "1", enable);
739}
740
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500741static struct ftrace_func_command ftrace_traceon_cmd = {
742 .name = "traceon",
743 .func = ftrace_trace_onoff_callback,
744};
745
746static struct ftrace_func_command ftrace_traceoff_cmd = {
747 .name = "traceoff",
748 .func = ftrace_trace_onoff_callback,
749};
750
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400751static struct ftrace_func_command ftrace_stacktrace_cmd = {
752 .name = "stacktrace",
753 .func = ftrace_stacktrace_callback,
754};
755
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400756static struct ftrace_func_command ftrace_dump_cmd = {
757 .name = "dump",
758 .func = ftrace_dump_callback,
759};
760
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400761static struct ftrace_func_command ftrace_cpudump_cmd = {
762 .name = "cpudump",
763 .func = ftrace_cpudump_callback,
764};
765
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500766static int __init init_func_cmd_traceon(void)
767{
768 int ret;
769
770 ret = register_ftrace_command(&ftrace_traceoff_cmd);
771 if (ret)
772 return ret;
773
774 ret = register_ftrace_command(&ftrace_traceon_cmd);
775 if (ret)
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400776 goto out_free_traceoff;
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400777
778 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400779 if (ret)
780 goto out_free_traceon;
781
782 ret = register_ftrace_command(&ftrace_dump_cmd);
783 if (ret)
784 goto out_free_stacktrace;
785
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400786 ret = register_ftrace_command(&ftrace_cpudump_cmd);
787 if (ret)
788 goto out_free_dump;
789
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400790 return 0;
791
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400792 out_free_dump:
793 unregister_ftrace_command(&ftrace_dump_cmd);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400794 out_free_stacktrace:
795 unregister_ftrace_command(&ftrace_stacktrace_cmd);
796 out_free_traceon:
797 unregister_ftrace_command(&ftrace_traceon_cmd);
798 out_free_traceoff:
799 unregister_ftrace_command(&ftrace_traceoff_cmd);
800
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500801 return ret;
802}
803#else
804static inline int init_func_cmd_traceon(void)
805{
806 return 0;
807}
808#endif /* CONFIG_DYNAMIC_FTRACE */
809
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -0500810__init int init_function_trace(void)
Steven Rostedt1b29b012008-05-12 21:20:42 +0200811{
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500812 init_func_cmd_traceon();
Steven Rostedt1b29b012008-05-12 21:20:42 +0200813 return register_tracer(&function_trace);
814}