blob: 2c2126e1871d4d19e71a02408314b0badc42d013 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Steven Rostedt1b29b012008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010011 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt1b29b012008-05-12 21:20:42 +020012 */
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -050013#include <linux/ring_buffer.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020014#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050017#include <linux/slab.h>
Ingo Molnar2e0f5762008-05-12 21:20:49 +020018#include <linux/fs.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020019
20#include "trace.h"
21
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050022static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct pt_regs *pt_regs);
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct pt_regs *pt_regs);
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050030static struct tracer_flags func_flags;
Steven Rostedta225cdd2009-01-15 23:06:03 -050031
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050032/* Our option */
33enum {
34 TRACE_FUNC_OPT_STACK = 0x1,
35};
Steven Rostedt53614992009-01-15 19:12:40 -050036
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +090037int ftrace_allocate_ftrace_ops(struct trace_array *tr)
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050038{
39 struct ftrace_ops *ops;
40
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +090041 /* The top level array uses the "global_ops" */
42 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
43 return 0;
44
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050045 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
46 if (!ops)
47 return -ENOMEM;
48
Wei Yang48a42f52020-06-10 11:32:51 +080049 /* Currently only the non stack version is supported */
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050050 ops->func = function_trace_call;
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -040051 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050052
53 tr->ops = ops;
54 ops->private = tr;
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +090055
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050056 return 0;
57}
Steven Rostedta225cdd2009-01-15 23:06:03 -050058
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +090059void ftrace_free_ftrace_ops(struct trace_array *tr)
60{
61 kfree(tr->ops);
62 tr->ops = NULL;
63}
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050064
65int ftrace_create_function_files(struct trace_array *tr,
66 struct dentry *parent)
67{
Steven Rostedt (Red Hat)5d6c97c2014-04-16 19:21:53 -040068 /*
69 * The top level array uses the "global_ops", and the files are
70 * created on boot up.
71 */
72 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
73 return 0;
74
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +090075 if (!tr->ops)
76 return -EINVAL;
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050077
78 ftrace_create_filter_files(tr->ops, parent);
79
80 return 0;
81}
82
83void ftrace_destroy_function_files(struct trace_array *tr)
84{
85 ftrace_destroy_filter_files(tr->ops);
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +090086 ftrace_free_ftrace_ops(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050087}
88
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -020089static int function_trace_init(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +020090{
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050091 ftrace_func_t func;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050092
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050093 /*
94 * Instance trace_arrays get their ops allocated
95 * at instance creation. Unless it failed
96 * the allocation.
97 */
98 if (!tr->ops)
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050099 return -ENOMEM;
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500100
101 /* Currently only the global instance can do stack tracing */
102 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
103 func_flags.val & TRACE_FUNC_OPT_STACK)
104 func = function_stack_trace_call;
105 else
106 func = function_trace_call;
107
108 ftrace_init_array_ops(tr, func);
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500109
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500110 tr->array_buffer.cpu = get_cpu();
Steven Rostedt26bc83f2008-07-10 20:58:14 -0400111 put_cpu();
112
Steven Rostedt41bc8142008-05-22 11:49:22 -0400113 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500114 tracing_start_function_trace(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100115 return 0;
Steven Rostedt1b29b012008-05-12 21:20:42 +0200116}
117
Ingo Molnare309b412008-05-12 21:20:51 +0200118static void function_trace_reset(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +0200119{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500120 tracing_stop_function_trace(tr);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200121 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500122 ftrace_reset_array_ops(tr);
Steven Rostedt1b29b012008-05-12 21:20:42 +0200123}
124
Steven Rostedt90369902008-11-05 16:05:44 -0500125static void function_trace_start(struct trace_array *tr)
126{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500127 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt90369902008-11-05 16:05:44 -0500128}
129
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500130static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400131function_trace_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400132 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500133{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500134 struct trace_array *tr = op->private;
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500135 struct trace_array_cpu *data;
136 unsigned long flags;
Steven Rostedtd41032a2013-01-24 07:52:34 -0500137 int bit;
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500138 int cpu;
139 int pc;
140
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500141 if (unlikely(!tr->function_enabled))
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500142 return;
143
Steven Rostedt897f68a2012-11-02 17:52:35 -0400144 pc = preempt_count();
145 preempt_disable_notrace();
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500146
Steven Rostedt897f68a2012-11-02 17:52:35 -0400147 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
148 if (bit < 0)
149 goto out;
150
151 cpu = smp_processor_id();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500152 data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt897f68a2012-11-02 17:52:35 -0400153 if (!atomic_read(&data->disabled)) {
154 local_save_flags(flags);
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500155 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500156 }
Steven Rostedt897f68a2012-11-02 17:52:35 -0400157 trace_clear_recursion(bit);
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500158
Steven Rostedt897f68a2012-11-02 17:52:35 -0400159 out:
160 preempt_enable_notrace();
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500161}
162
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500163#ifdef CONFIG_UNWINDER_ORC
164/*
165 * Skip 2:
166 *
167 * function_stack_trace_call()
168 * ftrace_call()
169 */
170#define STACK_SKIP 2
171#else
172/*
173 * Skip 3:
174 * __trace_stack()
175 * function_stack_trace_call()
176 * ftrace_call()
177 */
178#define STACK_SKIP 3
179#endif
180
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500181static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400182function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400183 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt53614992009-01-15 19:12:40 -0500184{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500185 struct trace_array *tr = op->private;
Steven Rostedt53614992009-01-15 19:12:40 -0500186 struct trace_array_cpu *data;
187 unsigned long flags;
188 long disabled;
189 int cpu;
190 int pc;
191
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500192 if (unlikely(!tr->function_enabled))
Steven Rostedt53614992009-01-15 19:12:40 -0500193 return;
194
195 /*
196 * Need to use raw, since this must be called before the
197 * recursive protection is performed.
198 */
199 local_irq_save(flags);
200 cpu = raw_smp_processor_id();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500201 data = per_cpu_ptr(tr->array_buffer.data, cpu);
Steven Rostedt53614992009-01-15 19:12:40 -0500202 disabled = atomic_inc_return(&data->disabled);
203
204 if (likely(disabled == 1)) {
205 pc = preempt_count();
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500206 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500207 __trace_stack(tr, flags, STACK_SKIP, pc);
Steven Rostedt53614992009-01-15 19:12:40 -0500208 }
209
210 atomic_dec(&data->disabled);
211 local_irq_restore(flags);
212}
213
Steven Rostedt53614992009-01-15 19:12:40 -0500214static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
217#endif
218 { } /* Always set a last empty entry */
219};
220
221static struct tracer_flags func_flags = {
222 .val = 0, /* By default: all flags disabled */
223 .opts = func_opts
224};
225
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500226static void tracing_start_function_trace(struct trace_array *tr)
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500227{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500228 tr->function_enabled = 0;
229 register_ftrace_function(tr->ops);
230 tr->function_enabled = 1;
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500231}
232
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500233static void tracing_stop_function_trace(struct trace_array *tr)
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500234{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500235 tr->function_enabled = 0;
236 unregister_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500237}
238
Chunyu Hud39cdd22016-03-08 21:37:01 +0800239static struct tracer function_trace;
240
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -0500241static int
242func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Steven Rostedt53614992009-01-15 19:12:40 -0500243{
Anton Vorontsovf555f122012-07-09 17:10:46 -0700244 switch (bit) {
245 case TRACE_FUNC_OPT_STACK:
Steven Rostedt53614992009-01-15 19:12:40 -0500246 /* do nothing if already set */
247 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
Anton Vorontsovf555f122012-07-09 17:10:46 -0700248 break;
Steven Rostedt53614992009-01-15 19:12:40 -0500249
Chunyu Hud39cdd22016-03-08 21:37:01 +0800250 /* We can change this flag when not running. */
251 if (tr->current_trace != &function_trace)
252 break;
253
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500254 unregister_ftrace_function(tr->ops);
255
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500256 if (set) {
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500257 tr->ops->func = function_stack_trace_call;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500258 register_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500259 } else {
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500260 tr->ops->func = function_trace_call;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500261 register_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500262 }
Steven Rostedt53614992009-01-15 19:12:40 -0500263
Anton Vorontsovf555f122012-07-09 17:10:46 -0700264 break;
Anton Vorontsovf555f122012-07-09 17:10:46 -0700265 default:
266 return -EINVAL;
Steven Rostedt53614992009-01-15 19:12:40 -0500267 }
268
Anton Vorontsovf555f122012-07-09 17:10:46 -0700269 return 0;
Steven Rostedt53614992009-01-15 19:12:40 -0500270}
271
Steven Rostedt (Red Hat)8f768992013-07-18 14:41:51 -0400272static struct tracer function_trace __tracer_data =
Steven Rostedt1b29b012008-05-12 21:20:42 +0200273{
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500274 .name = "function",
275 .init = function_trace_init,
276 .reset = function_trace_reset,
277 .start = function_trace_start,
Steven Rostedt53614992009-01-15 19:12:40 -0500278 .flags = &func_flags,
279 .set_flag = func_set_flag,
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500280 .allow_instances = true,
Steven Rostedt60a11772008-05-12 21:20:44 +0200281#ifdef CONFIG_FTRACE_SELFTEST
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500282 .selftest = trace_selftest_startup_function,
Steven Rostedt60a11772008-05-12 21:20:44 +0200283#endif
Steven Rostedt1b29b012008-05-12 21:20:42 +0200284};
285
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500286#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400287static void update_traceon_count(struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400288 unsigned long ip,
289 struct trace_array *tr, bool on,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400290 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500291{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400292 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400293 long *count;
294 long old_count;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500295
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500296 /*
297 * Tracing gets disabled (or enabled) once per count.
Steven Rostedt (Red Hat)0af26492014-11-20 10:05:36 -0500298 * This function can be called at the same time on multiple CPUs.
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500299 * It is fine if both disable (or enable) tracing, as disabling
300 * (or enabling) the second time doesn't do anything as the
301 * state of the tracer is already disabled (or enabled).
302 * What needs to be synchronized in this case is that the count
303 * only gets decremented once, even if the tracer is disabled
304 * (or enabled) twice, as the second one is really a nop.
305 *
306 * The memory barriers guarantee that we only decrement the
307 * counter once. First the count is read to a local variable
308 * and a read barrier is used to make sure that it is loaded
309 * before checking if the tracer is in the state we want.
310 * If the tracer is not in the state we want, then the count
311 * is guaranteed to be the old count.
312 *
313 * Next the tracer is set to the state we want (disabled or enabled)
314 * then a write memory barrier is used to make sure that
315 * the new state is visible before changing the counter by
316 * one minus the old counter. This guarantees that another CPU
317 * executing this code will see the new state before seeing
Steven Rostedt (Red Hat)0af26492014-11-20 10:05:36 -0500318 * the new counter value, and would not do anything if the new
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500319 * counter is seen.
320 *
321 * Note, there is no synchronization between this and a user
322 * setting the tracing_on file. But we currently don't care
323 * about that.
324 */
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400325 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
326 old_count = *count;
327
328 if (old_count <= 0)
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500329 return;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500330
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500331 /* Make sure we see count before checking tracing state */
332 smp_rmb();
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500333
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400334 if (on == !!tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500335 return;
336
337 if (on)
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400338 tracer_tracing_on(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500339 else
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400340 tracer_tracing_off(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500341
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500342 /* Make sure tracing state is visible before updating count */
343 smp_wmb();
344
345 *count = old_count - 1;
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500346}
347
348static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400349ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400350 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400351 void *data)
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500352{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400353 update_traceon_count(ops, ip, tr, 1, data);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500354}
355
356static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400357ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400358 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400359 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500360{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400361 update_traceon_count(ops, ip, tr, 0, data);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500362}
363
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500364static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400365ftrace_traceon(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400366 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400367 void *data)
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500368{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400369 if (tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500370 return;
371
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400372 tracer_tracing_on(tr);
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500373}
374
375static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400376ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400377 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400378 void *data)
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500379{
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400380 if (!tracer_tracing_is_on(tr))
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500381 return;
382
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400383 tracer_tracing_off(tr);
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500384}
385
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500386#ifdef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400387/*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500388 * Skip 3:
389 *
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400390 * function_trace_probe_call()
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500391 * ftrace_ops_assist_func()
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400392 * ftrace_call()
393 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500394#define FTRACE_STACK_SKIP 3
395#else
396/*
397 * Skip 5:
398 *
399 * __trace_stack()
400 * ftrace_stacktrace()
401 * function_trace_probe_call()
402 * ftrace_ops_assist_func()
403 * ftrace_call()
404 */
405#define FTRACE_STACK_SKIP 5
406#endif
Steven Rostedte110e3d2009-02-16 23:38:13 -0500407
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400408static __always_inline void trace_stack(struct trace_array *tr)
409{
410 unsigned long flags;
411 int pc;
412
413 local_save_flags(flags);
414 pc = preempt_count();
415
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -0500416 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400417}
418
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400419static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400420ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400421 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400422 void *data)
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400423{
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400424 trace_stack(tr);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400425}
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500426
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400427static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400428ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400429 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400430 void *data)
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400431{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400432 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400433 long *count;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500434 long old_count;
435 long new_count;
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500436
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400437 if (!tracing_is_on())
438 return;
439
440 /* unlimited? */
441 if (!mapper) {
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400442 trace_stack(tr);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400443 return;
444 }
445
446 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
447
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500448 /*
449 * Stack traces should only execute the number of times the
450 * user specified in the counter.
451 */
452 do {
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500453 old_count = *count;
454
455 if (!old_count)
456 return;
457
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500458 new_count = old_count - 1;
459 new_count = cmpxchg(count, old_count, new_count);
460 if (new_count == old_count)
Steven Rostedt (VMware)dcc19d22017-04-20 11:59:18 -0400461 trace_stack(tr);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500462
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400463 if (!tracing_is_on())
464 return;
465
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500466 } while (new_count != old_count);
467}
468
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400469static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
470 void *data)
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500471{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400472 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400473 long *count = NULL;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500474
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400475 if (mapper)
476 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500477
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400478 if (count) {
479 if (*count <= 0)
480 return 0;
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500481 (*count)--;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400482 }
Steven Rostedt (Red Hat)a9ce7c32014-11-17 23:08:24 -0500483
484 return 1;
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400485}
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500486
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400487static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400488ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400489 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400490 void *data)
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400491{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400492 if (update_count(ops, ip, data))
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400493 ftrace_dump(DUMP_ALL);
494}
495
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400496/* Only dump the current CPU buffer. */
497static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -0400498ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400499 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400500 void *data)
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400501{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400502 if (update_count(ops, ip, data))
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400503 ftrace_dump(DUMP_ORIG);
504}
505
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500506static int
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400507ftrace_probe_print(const char *name, struct seq_file *m,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400508 unsigned long ip, struct ftrace_probe_ops *ops,
509 void *data)
Steven Rostedte110e3d2009-02-16 23:38:13 -0500510{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400511 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400512 long *count = NULL;
Steven Rostedte110e3d2009-02-16 23:38:13 -0500513
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400514 seq_printf(m, "%ps:%s", (void *)ip, name);
Steven Rostedte110e3d2009-02-16 23:38:13 -0500515
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400516 if (mapper)
517 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
518
519 if (count)
520 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt35ebf1c2009-02-17 13:12:12 -0500521 else
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400522 seq_puts(m, ":unlimited\n");
Steven Rostedte110e3d2009-02-16 23:38:13 -0500523
524 return 0;
525}
526
527static int
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400528ftrace_traceon_print(struct seq_file *m, unsigned long ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400529 struct ftrace_probe_ops *ops,
530 void *data)
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500531{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400532 return ftrace_probe_print("traceon", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400533}
534
535static int
536ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
537 struct ftrace_probe_ops *ops, void *data)
538{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400539 return ftrace_probe_print("traceoff", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400540}
541
542static int
543ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
544 struct ftrace_probe_ops *ops, void *data)
545{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400546 return ftrace_probe_print("stacktrace", m, ip, ops, data);
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400547}
548
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400549static int
550ftrace_dump_print(struct seq_file *m, unsigned long ip,
551 struct ftrace_probe_ops *ops, void *data)
552{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400553 return ftrace_probe_print("dump", m, ip, ops, data);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400554}
555
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400556static int
557ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
558 struct ftrace_probe_ops *ops, void *data)
559{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400560 return ftrace_probe_print("cpudump", m, ip, ops, data);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400561}
562
563
564static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400565ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400566 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400567{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400568 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400569
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400570 if (!mapper) {
571 mapper = allocate_ftrace_func_mapper();
572 if (!mapper)
573 return -ENOMEM;
574 *data = mapper;
575 }
576
577 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400578}
579
580static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -0400581ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400582 unsigned long ip, void *data)
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400583{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -0400584 struct ftrace_func_mapper *mapper = data;
585
586 if (!ip) {
587 free_ftrace_func_mapper(mapper, NULL);
588 return;
589 }
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400590
591 ftrace_func_mapper_remove_ip(mapper, ip);
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400592}
593
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400594static struct ftrace_probe_ops traceon_count_probe_ops = {
595 .func = ftrace_traceon_count,
596 .print = ftrace_traceon_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400597 .init = ftrace_count_init,
598 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400599};
600
601static struct ftrace_probe_ops traceoff_count_probe_ops = {
602 .func = ftrace_traceoff_count,
603 .print = ftrace_traceoff_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400604 .init = ftrace_count_init,
605 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400606};
607
608static struct ftrace_probe_ops stacktrace_count_probe_ops = {
609 .func = ftrace_stacktrace_count,
610 .print = ftrace_stacktrace_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400611 .init = ftrace_count_init,
612 .free = ftrace_count_free,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400613};
614
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400615static struct ftrace_probe_ops dump_probe_ops = {
616 .func = ftrace_dump_probe,
617 .print = ftrace_dump_print,
Steven Rostedt (VMware)fe014e22017-04-03 23:22:41 -0400618 .init = ftrace_count_init,
619 .free = ftrace_count_free,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400620};
621
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400622static struct ftrace_probe_ops cpudump_probe_ops = {
623 .func = ftrace_cpudump_probe,
624 .print = ftrace_cpudump_print,
625};
626
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400627static struct ftrace_probe_ops traceon_probe_ops = {
628 .func = ftrace_traceon,
629 .print = ftrace_traceon_print,
630};
631
632static struct ftrace_probe_ops traceoff_probe_ops = {
633 .func = ftrace_traceoff,
634 .print = ftrace_traceoff_print,
635};
636
637static struct ftrace_probe_ops stacktrace_probe_ops = {
638 .func = ftrace_stacktrace,
639 .print = ftrace_stacktrace_print,
640};
641
642static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400643ftrace_trace_probe_callback(struct trace_array *tr,
644 struct ftrace_probe_ops *ops,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400645 struct ftrace_hash *hash, char *glob,
646 char *cmd, char *param, int enable)
647{
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500648 void *count = (void *)-1;
649 char *number;
650 int ret;
651
652 /* hash funcs only work with set_ftrace_filter */
653 if (!enable)
654 return -EINVAL;
655
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -0400656 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -0400657 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)8b8fa622013-03-12 09:25:00 -0400658
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500659 if (!param)
660 goto out_reg;
661
662 number = strsep(&param, ":");
663
664 if (!strlen(number))
665 goto out_reg;
666
667 /*
668 * We use the callback data field (which is a pointer)
669 * as our counter.
670 */
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200671 ret = kstrtoul(number, 0, (unsigned long *)&count);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500672 if (ret)
673 return ret;
674
675 out_reg:
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400676 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500677
Xiao Guangrong04aef322009-07-15 12:29:06 +0800678 return ret < 0 ? ret : 0;
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500679}
680
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400681static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400682ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400683 char *glob, char *cmd, char *param, int enable)
684{
685 struct ftrace_probe_ops *ops;
686
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400687 if (!tr)
688 return -ENODEV;
689
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400690 /* we register both traceon and traceoff to this callback */
691 if (strcmp(cmd, "traceon") == 0)
692 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
693 else
694 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
695
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400696 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400697 param, enable);
698}
699
700static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400701ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400702 char *glob, char *cmd, char *param, int enable)
703{
704 struct ftrace_probe_ops *ops;
705
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400706 if (!tr)
707 return -ENODEV;
708
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400709 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
710
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400711 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400712 param, enable);
713}
714
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400715static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400716ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400717 char *glob, char *cmd, char *param, int enable)
718{
719 struct ftrace_probe_ops *ops;
720
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400721 if (!tr)
722 return -ENODEV;
723
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400724 ops = &dump_probe_ops;
725
726 /* Only dump once. */
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400727 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400728 "1", enable);
729}
730
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400731static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400732ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400733 char *glob, char *cmd, char *param, int enable)
734{
735 struct ftrace_probe_ops *ops;
736
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -0400737 if (!tr)
738 return -ENODEV;
739
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400740 ops = &cpudump_probe_ops;
741
742 /* Only dump once. */
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -0400743 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400744 "1", enable);
745}
746
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500747static struct ftrace_func_command ftrace_traceon_cmd = {
748 .name = "traceon",
749 .func = ftrace_trace_onoff_callback,
750};
751
752static struct ftrace_func_command ftrace_traceoff_cmd = {
753 .name = "traceoff",
754 .func = ftrace_trace_onoff_callback,
755};
756
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400757static struct ftrace_func_command ftrace_stacktrace_cmd = {
758 .name = "stacktrace",
759 .func = ftrace_stacktrace_callback,
760};
761
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400762static struct ftrace_func_command ftrace_dump_cmd = {
763 .name = "dump",
764 .func = ftrace_dump_callback,
765};
766
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400767static struct ftrace_func_command ftrace_cpudump_cmd = {
768 .name = "cpudump",
769 .func = ftrace_cpudump_callback,
770};
771
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500772static int __init init_func_cmd_traceon(void)
773{
774 int ret;
775
776 ret = register_ftrace_command(&ftrace_traceoff_cmd);
777 if (ret)
778 return ret;
779
780 ret = register_ftrace_command(&ftrace_traceon_cmd);
781 if (ret)
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400782 goto out_free_traceoff;
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400783
784 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400785 if (ret)
786 goto out_free_traceon;
787
788 ret = register_ftrace_command(&ftrace_dump_cmd);
789 if (ret)
790 goto out_free_stacktrace;
791
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400792 ret = register_ftrace_command(&ftrace_cpudump_cmd);
793 if (ret)
794 goto out_free_dump;
795
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400796 return 0;
797
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400798 out_free_dump:
799 unregister_ftrace_command(&ftrace_dump_cmd);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400800 out_free_stacktrace:
801 unregister_ftrace_command(&ftrace_stacktrace_cmd);
802 out_free_traceon:
803 unregister_ftrace_command(&ftrace_traceon_cmd);
804 out_free_traceoff:
805 unregister_ftrace_command(&ftrace_traceoff_cmd);
806
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500807 return ret;
808}
809#else
810static inline int init_func_cmd_traceon(void)
811{
812 return 0;
813}
814#endif /* CONFIG_DYNAMIC_FTRACE */
815
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -0500816__init int init_function_trace(void)
Steven Rostedt1b29b012008-05-12 21:20:42 +0200817{
Steven Rostedt23b4ff3a2009-02-14 19:04:24 -0500818 init_func_cmd_traceon();
Steven Rostedt1b29b012008-05-12 21:20:42 +0200819 return register_tracer(&function_trace);
820}