blob: 3a5fa08cedb0a78caacd7ff3d96cf3333f4e04f0 [file] [log] [blame]
Steven Rostedt1b29b012008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
Steven Rostedt1b29b012008-05-12 21:20:42 +020012#include <linux/debugfs.h>
13#include <linux/uaccess.h>
14#include <linux/ftrace.h>
Ingo Molnar2e0f5762008-05-12 21:20:49 +020015#include <linux/fs.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020016
17#include "trace.h"
18
Steven Rostedt53614992009-01-15 19:12:40 -050019static struct trace_array *func_trace;
20
Ingo Molnare309b412008-05-12 21:20:51 +020021static void start_function_trace(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +020022{
Steven Rostedt26bc83f2008-07-10 20:58:14 -040023 tr->cpu = get_cpu();
Pekka J Enberg213cc062008-12-19 12:08:39 +020024 tracing_reset_online_cpus(tr);
Steven Rostedt26bc83f2008-07-10 20:58:14 -040025 put_cpu();
26
Steven Rostedt41bc8142008-05-22 11:49:22 -040027 tracing_start_cmdline_record();
Steven Rostedt1b29b012008-05-12 21:20:42 +020028 tracing_start_function_trace();
29}
30
Ingo Molnare309b412008-05-12 21:20:51 +020031static void stop_function_trace(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +020032{
33 tracing_stop_function_trace();
Steven Rostedt41bc8142008-05-22 11:49:22 -040034 tracing_stop_cmdline_record();
Steven Rostedt1b29b012008-05-12 21:20:42 +020035}
36
Frederic Weisbecker1c800252008-11-16 05:57:26 +010037static int function_trace_init(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +020038{
Steven Rostedt53614992009-01-15 19:12:40 -050039 func_trace = tr;
Steven Rostedtc76f0692008-11-07 22:36:02 -050040 start_function_trace(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +010041 return 0;
Steven Rostedt1b29b012008-05-12 21:20:42 +020042}
43
Ingo Molnare309b412008-05-12 21:20:51 +020044static void function_trace_reset(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +020045{
Steven Rostedtc76f0692008-11-07 22:36:02 -050046 stop_function_trace(tr);
Steven Rostedt1b29b012008-05-12 21:20:42 +020047}
48
Steven Rostedt90369902008-11-05 16:05:44 -050049static void function_trace_start(struct trace_array *tr)
50{
Pekka J Enberg213cc062008-12-19 12:08:39 +020051 tracing_reset_online_cpus(tr);
Steven Rostedt90369902008-11-05 16:05:44 -050052}
53
Steven Rostedt53614992009-01-15 19:12:40 -050054static void
55function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
56{
57 struct trace_array *tr = func_trace;
58 struct trace_array_cpu *data;
59 unsigned long flags;
60 long disabled;
61 int cpu;
62 int pc;
63
64 if (unlikely(!ftrace_function_enabled))
65 return;
66
67 /*
68 * Need to use raw, since this must be called before the
69 * recursive protection is performed.
70 */
71 local_irq_save(flags);
72 cpu = raw_smp_processor_id();
73 data = tr->data[cpu];
74 disabled = atomic_inc_return(&data->disabled);
75
76 if (likely(disabled == 1)) {
77 pc = preempt_count();
78 /*
79 * skip over 5 funcs:
80 * __ftrace_trace_stack,
81 * __trace_stack,
82 * function_stack_trace_call
83 * ftrace_list_func
84 * ftrace_call
85 */
86 __trace_stack(tr, data, flags, 5, pc);
87 }
88
89 atomic_dec(&data->disabled);
90 local_irq_restore(flags);
91}
92
93static struct ftrace_ops trace_stack_ops __read_mostly =
94{
95 .func = function_stack_trace_call,
96};
97
98/* Our two options */
99enum {
100 TRACE_FUNC_OPT_STACK = 0x1,
101};
102
103static struct tracer_opt func_opts[] = {
104#ifdef CONFIG_STACKTRACE
105 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
106#endif
107 { } /* Always set a last empty entry */
108};
109
110static struct tracer_flags func_flags = {
111 .val = 0, /* By default: all flags disabled */
112 .opts = func_opts
113};
114
115static int func_set_flag(u32 old_flags, u32 bit, int set)
116{
117 if (bit == TRACE_FUNC_OPT_STACK) {
118 /* do nothing if already set */
119 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
120 return 0;
121
122 if (set)
123 register_ftrace_function(&trace_stack_ops);
124 else
125 unregister_ftrace_function(&trace_stack_ops);
126
127 return 0;
128 }
129
130 return -EINVAL;
131}
132
Steven Rostedt1b29b012008-05-12 21:20:42 +0200133static struct tracer function_trace __read_mostly =
134{
Steven Rostedt3ce83ae2008-10-06 19:06:13 -0400135 .name = "function",
Steven Rostedt1b29b012008-05-12 21:20:42 +0200136 .init = function_trace_init,
137 .reset = function_trace_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500138 .start = function_trace_start,
Steven Rostedt53614992009-01-15 19:12:40 -0500139 .flags = &func_flags,
140 .set_flag = func_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200141#ifdef CONFIG_FTRACE_SELFTEST
142 .selftest = trace_selftest_startup_function,
143#endif
Steven Rostedt1b29b012008-05-12 21:20:42 +0200144};
145
146static __init int init_function_trace(void)
147{
148 return register_tracer(&function_trace);
149}
150
151device_initcall(init_function_trace);