Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * ring buffer based function tracer |
| 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
| 6 | * |
| 7 | * Based on code from the latency_tracer, that is: |
| 8 | * |
| 9 | * Copyright (C) 2004-2006 Ingo Molnar |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 11 | */ |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 12 | #include <linux/ring_buffer.h> |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 13 | #include <linux/debugfs.h> |
| 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/ftrace.h> |
Ingo Molnar | 2e0f576 | 2008-05-12 21:20:49 +0200 | [diff] [blame] | 16 | #include <linux/fs.h> |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 17 | |
| 18 | #include "trace.h" |
| 19 | |
Steven Rostedt | a225cdd | 2009-01-15 23:06:03 -0500 | [diff] [blame] | 20 | /* function tracing enabled */ |
| 21 | static int ftrace_function_enabled; |
| 22 | |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 23 | static struct trace_array *func_trace; |
| 24 | |
Steven Rostedt | a225cdd | 2009-01-15 23:06:03 -0500 | [diff] [blame] | 25 | static void tracing_start_function_trace(void); |
| 26 | static void tracing_stop_function_trace(void); |
| 27 | |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 28 | static int function_trace_init(struct trace_array *tr) |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 29 | { |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 30 | func_trace = tr; |
Steven Rostedt | 26bc83f | 2008-07-10 20:58:14 -0400 | [diff] [blame] | 31 | tr->cpu = get_cpu(); |
Steven Rostedt | 26bc83f | 2008-07-10 20:58:14 -0400 | [diff] [blame] | 32 | put_cpu(); |
| 33 | |
Steven Rostedt | 41bc814 | 2008-05-22 11:49:22 -0400 | [diff] [blame] | 34 | tracing_start_cmdline_record(); |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 35 | tracing_start_function_trace(); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 36 | return 0; |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 37 | } |
| 38 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 39 | static void function_trace_reset(struct trace_array *tr) |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 40 | { |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 41 | tracing_stop_function_trace(); |
| 42 | tracing_stop_cmdline_record(); |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 43 | } |
| 44 | |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 45 | static void function_trace_start(struct trace_array *tr) |
| 46 | { |
Pekka J Enberg | 213cc06 | 2008-12-19 12:08:39 +0200 | [diff] [blame] | 47 | tracing_reset_online_cpus(tr); |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 48 | } |
| 49 | |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 50 | static void |
Steven Rostedt | 2f5f6ad | 2011-08-08 16:57:47 -0400 | [diff] [blame] | 51 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, |
Steven Rostedt | a1e2e31 | 2011-08-09 12:50:46 -0400 | [diff] [blame] | 52 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 53 | { |
| 54 | struct trace_array *tr = func_trace; |
| 55 | struct trace_array_cpu *data; |
| 56 | unsigned long flags; |
| 57 | long disabled; |
Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 58 | int cpu; |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 59 | int pc; |
| 60 | |
| 61 | if (unlikely(!ftrace_function_enabled)) |
| 62 | return; |
| 63 | |
| 64 | pc = preempt_count(); |
Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 65 | preempt_disable_notrace(); |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 66 | local_save_flags(flags); |
| 67 | cpu = raw_smp_processor_id(); |
| 68 | data = tr->data[cpu]; |
| 69 | disabled = atomic_inc_return(&data->disabled); |
| 70 | |
| 71 | if (likely(disabled == 1)) |
Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 72 | trace_function(tr, ip, parent_ip, flags, pc); |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 73 | |
| 74 | atomic_dec(&data->disabled); |
Steven Rostedt | 5168ae5 | 2010-06-03 09:36:50 -0400 | [diff] [blame] | 75 | preempt_enable_notrace(); |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 76 | } |
| 77 | |
Anton Vorontsov | 65f8c95 | 2012-07-17 14:26:15 -0700 | [diff] [blame] | 78 | /* Our option */ |
Anton Vorontsov | 21f6794 | 2012-07-09 17:10:42 -0700 | [diff] [blame] | 79 | enum { |
| 80 | TRACE_FUNC_OPT_STACK = 0x1, |
Anton Vorontsov | 21f6794 | 2012-07-09 17:10:42 -0700 | [diff] [blame] | 81 | }; |
| 82 | |
| 83 | static struct tracer_flags func_flags; |
| 84 | |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 85 | static void |
Steven Rostedt | 2f5f6ad | 2011-08-08 16:57:47 -0400 | [diff] [blame] | 86 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
Steven Rostedt | a1e2e31 | 2011-08-09 12:50:46 -0400 | [diff] [blame] | 87 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 88 | |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 89 | { |
| 90 | struct trace_array *tr = func_trace; |
| 91 | struct trace_array_cpu *data; |
| 92 | unsigned long flags; |
| 93 | long disabled; |
| 94 | int cpu; |
| 95 | int pc; |
| 96 | |
| 97 | if (unlikely(!ftrace_function_enabled)) |
| 98 | return; |
| 99 | |
| 100 | /* |
| 101 | * Need to use raw, since this must be called before the |
| 102 | * recursive protection is performed. |
| 103 | */ |
| 104 | local_irq_save(flags); |
| 105 | cpu = raw_smp_processor_id(); |
| 106 | data = tr->data[cpu]; |
| 107 | disabled = atomic_inc_return(&data->disabled); |
| 108 | |
| 109 | if (likely(disabled == 1)) { |
| 110 | pc = preempt_count(); |
Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 111 | trace_function(tr, ip, parent_ip, flags, pc); |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | atomic_dec(&data->disabled); |
| 115 | local_irq_restore(flags); |
| 116 | } |
| 117 | |
| 118 | static void |
Steven Rostedt | 2f5f6ad | 2011-08-08 16:57:47 -0400 | [diff] [blame] | 119 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
Steven Rostedt | a1e2e31 | 2011-08-09 12:50:46 -0400 | [diff] [blame] | 120 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 121 | { |
| 122 | struct trace_array *tr = func_trace; |
| 123 | struct trace_array_cpu *data; |
| 124 | unsigned long flags; |
| 125 | long disabled; |
| 126 | int cpu; |
| 127 | int pc; |
| 128 | |
| 129 | if (unlikely(!ftrace_function_enabled)) |
| 130 | return; |
| 131 | |
| 132 | /* |
| 133 | * Need to use raw, since this must be called before the |
| 134 | * recursive protection is performed. |
| 135 | */ |
| 136 | local_irq_save(flags); |
| 137 | cpu = raw_smp_processor_id(); |
| 138 | data = tr->data[cpu]; |
| 139 | disabled = atomic_inc_return(&data->disabled); |
| 140 | |
| 141 | if (likely(disabled == 1)) { |
| 142 | pc = preempt_count(); |
Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 143 | trace_function(tr, ip, parent_ip, flags, pc); |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 144 | /* |
| 145 | * skip over 5 funcs: |
| 146 | * __ftrace_trace_stack, |
| 147 | * __trace_stack, |
| 148 | * function_stack_trace_call |
| 149 | * ftrace_list_func |
| 150 | * ftrace_call |
| 151 | */ |
Arnaldo Carvalho de Melo | 7be4215 | 2009-02-05 01:13:37 -0500 | [diff] [blame] | 152 | __trace_stack(tr, flags, 5, pc); |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | atomic_dec(&data->disabled); |
| 156 | local_irq_restore(flags); |
| 157 | } |
| 158 | |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 159 | |
| 160 | static struct ftrace_ops trace_ops __read_mostly = |
| 161 | { |
| 162 | .func = function_trace_call, |
Steven Rostedt | 4740974 | 2012-07-20 11:04:44 -0400 | [diff] [blame] | 163 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
Steven Rostedt | bb3c3c9 | 2009-01-15 20:40:23 -0500 | [diff] [blame] | 164 | }; |
| 165 | |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 166 | static struct ftrace_ops trace_stack_ops __read_mostly = |
| 167 | { |
| 168 | .func = function_stack_trace_call, |
Steven Rostedt | 4740974 | 2012-07-20 11:04:44 -0400 | [diff] [blame] | 169 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 170 | }; |
| 171 | |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 172 | static struct tracer_opt func_opts[] = { |
| 173 | #ifdef CONFIG_STACKTRACE |
| 174 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, |
| 175 | #endif |
| 176 | { } /* Always set a last empty entry */ |
| 177 | }; |
| 178 | |
| 179 | static struct tracer_flags func_flags = { |
| 180 | .val = 0, /* By default: all flags disabled */ |
| 181 | .opts = func_opts |
| 182 | }; |
| 183 | |
Steven Rostedt | a225cdd | 2009-01-15 23:06:03 -0500 | [diff] [blame] | 184 | static void tracing_start_function_trace(void) |
Steven Rostedt | 3eb36aa | 2009-01-15 22:21:43 -0500 | [diff] [blame] | 185 | { |
| 186 | ftrace_function_enabled = 0; |
| 187 | |
| 188 | if (trace_flags & TRACE_ITER_PREEMPTONLY) |
| 189 | trace_ops.func = function_trace_call_preempt_only; |
| 190 | else |
| 191 | trace_ops.func = function_trace_call; |
| 192 | |
| 193 | if (func_flags.val & TRACE_FUNC_OPT_STACK) |
| 194 | register_ftrace_function(&trace_stack_ops); |
| 195 | else |
| 196 | register_ftrace_function(&trace_ops); |
| 197 | |
| 198 | ftrace_function_enabled = 1; |
| 199 | } |
| 200 | |
Steven Rostedt | a225cdd | 2009-01-15 23:06:03 -0500 | [diff] [blame] | 201 | static void tracing_stop_function_trace(void) |
Steven Rostedt | 3eb36aa | 2009-01-15 22:21:43 -0500 | [diff] [blame] | 202 | { |
| 203 | ftrace_function_enabled = 0; |
Frederic Weisbecker | c85a17e | 2009-06-20 05:45:14 +0200 | [diff] [blame] | 204 | |
| 205 | if (func_flags.val & TRACE_FUNC_OPT_STACK) |
| 206 | unregister_ftrace_function(&trace_stack_ops); |
| 207 | else |
| 208 | unregister_ftrace_function(&trace_ops); |
Steven Rostedt | 3eb36aa | 2009-01-15 22:21:43 -0500 | [diff] [blame] | 209 | } |
| 210 | |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 211 | static int func_set_flag(u32 old_flags, u32 bit, int set) |
| 212 | { |
Anton Vorontsov | f555f12 | 2012-07-09 17:10:46 -0700 | [diff] [blame] | 213 | switch (bit) { |
| 214 | case TRACE_FUNC_OPT_STACK: |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 215 | /* do nothing if already set */ |
| 216 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) |
Anton Vorontsov | f555f12 | 2012-07-09 17:10:46 -0700 | [diff] [blame] | 217 | break; |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 218 | |
Steven Rostedt | 3eb36aa | 2009-01-15 22:21:43 -0500 | [diff] [blame] | 219 | if (set) { |
| 220 | unregister_ftrace_function(&trace_ops); |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 221 | register_ftrace_function(&trace_stack_ops); |
Steven Rostedt | 3eb36aa | 2009-01-15 22:21:43 -0500 | [diff] [blame] | 222 | } else { |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 223 | unregister_ftrace_function(&trace_stack_ops); |
Steven Rostedt | 3eb36aa | 2009-01-15 22:21:43 -0500 | [diff] [blame] | 224 | register_ftrace_function(&trace_ops); |
| 225 | } |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 226 | |
Anton Vorontsov | f555f12 | 2012-07-09 17:10:46 -0700 | [diff] [blame] | 227 | break; |
Anton Vorontsov | f555f12 | 2012-07-09 17:10:46 -0700 | [diff] [blame] | 228 | default: |
| 229 | return -EINVAL; |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 230 | } |
| 231 | |
Anton Vorontsov | f555f12 | 2012-07-09 17:10:46 -0700 | [diff] [blame] | 232 | return 0; |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 233 | } |
| 234 | |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 235 | static struct tracer function_trace __read_mostly = |
| 236 | { |
Steven Rostedt | 3eb36aa | 2009-01-15 22:21:43 -0500 | [diff] [blame] | 237 | .name = "function", |
| 238 | .init = function_trace_init, |
| 239 | .reset = function_trace_reset, |
| 240 | .start = function_trace_start, |
Frederic Weisbecker | 6eaaa5d | 2009-02-11 02:25:00 +0100 | [diff] [blame] | 241 | .wait_pipe = poll_wait_pipe, |
Steven Rostedt | 5361499 | 2009-01-15 19:12:40 -0500 | [diff] [blame] | 242 | .flags = &func_flags, |
| 243 | .set_flag = func_set_flag, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 244 | #ifdef CONFIG_FTRACE_SELFTEST |
Steven Rostedt | 3eb36aa | 2009-01-15 22:21:43 -0500 | [diff] [blame] | 245 | .selftest = trace_selftest_startup_function, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 246 | #endif |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 247 | }; |
| 248 | |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 249 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 250 | static void |
| 251 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) |
| 252 | { |
| 253 | long *count = (long *)data; |
| 254 | |
| 255 | if (tracing_is_on()) |
| 256 | return; |
| 257 | |
| 258 | if (!*count) |
| 259 | return; |
| 260 | |
| 261 | if (*count != -1) |
| 262 | (*count)--; |
| 263 | |
| 264 | tracing_on(); |
| 265 | } |
| 266 | |
| 267 | static void |
| 268 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) |
| 269 | { |
| 270 | long *count = (long *)data; |
| 271 | |
| 272 | if (!tracing_is_on()) |
| 273 | return; |
| 274 | |
| 275 | if (!*count) |
| 276 | return; |
| 277 | |
| 278 | if (*count != -1) |
| 279 | (*count)--; |
| 280 | |
| 281 | tracing_off(); |
| 282 | } |
| 283 | |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 284 | static int |
| 285 | ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 286 | struct ftrace_probe_ops *ops, void *data); |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 287 | |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 288 | static struct ftrace_probe_ops traceon_probe_ops = { |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 289 | .func = ftrace_traceon, |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 290 | .print = ftrace_trace_onoff_print, |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 291 | }; |
| 292 | |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 293 | static struct ftrace_probe_ops traceoff_probe_ops = { |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 294 | .func = ftrace_traceoff, |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 295 | .print = ftrace_trace_onoff_print, |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 296 | }; |
| 297 | |
| 298 | static int |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 299 | ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 300 | struct ftrace_probe_ops *ops, void *data) |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 301 | { |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 302 | long count = (long)data; |
| 303 | |
Steven Rostedt | b375a11 | 2009-09-17 00:05:58 -0400 | [diff] [blame] | 304 | seq_printf(m, "%ps:", (void *)ip); |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 305 | |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 306 | if (ops == &traceon_probe_ops) |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 307 | seq_printf(m, "traceon"); |
| 308 | else |
| 309 | seq_printf(m, "traceoff"); |
| 310 | |
Steven Rostedt | 35ebf1c | 2009-02-17 13:12:12 -0500 | [diff] [blame] | 311 | if (count == -1) |
| 312 | seq_printf(m, ":unlimited\n"); |
| 313 | else |
Li Zefan | 00e54d0 | 2009-06-25 14:05:27 +0800 | [diff] [blame] | 314 | seq_printf(m, ":count=%ld\n", count); |
Steven Rostedt | e110e3d | 2009-02-16 23:38:13 -0500 | [diff] [blame] | 315 | |
| 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | static int |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 320 | ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) |
| 321 | { |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 322 | struct ftrace_probe_ops *ops; |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 323 | |
| 324 | /* we register both traceon and traceoff to this callback */ |
| 325 | if (strcmp(cmd, "traceon") == 0) |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 326 | ops = &traceon_probe_ops; |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 327 | else |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 328 | ops = &traceoff_probe_ops; |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 329 | |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 330 | unregister_ftrace_function_probe_func(glob, ops); |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 331 | |
| 332 | return 0; |
| 333 | } |
| 334 | |
| 335 | static int |
Steven Rostedt | 43dd61c | 2011-07-07 11:09:22 -0400 | [diff] [blame] | 336 | ftrace_trace_onoff_callback(struct ftrace_hash *hash, |
| 337 | char *glob, char *cmd, char *param, int enable) |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 338 | { |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 339 | struct ftrace_probe_ops *ops; |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 340 | void *count = (void *)-1; |
| 341 | char *number; |
| 342 | int ret; |
| 343 | |
| 344 | /* hash funcs only work with set_ftrace_filter */ |
| 345 | if (!enable) |
| 346 | return -EINVAL; |
| 347 | |
| 348 | if (glob[0] == '!') |
| 349 | return ftrace_trace_onoff_unreg(glob+1, cmd, param); |
| 350 | |
| 351 | /* we register both traceon and traceoff to this callback */ |
| 352 | if (strcmp(cmd, "traceon") == 0) |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 353 | ops = &traceon_probe_ops; |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 354 | else |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 355 | ops = &traceoff_probe_ops; |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 356 | |
| 357 | if (!param) |
| 358 | goto out_reg; |
| 359 | |
| 360 | number = strsep(¶m, ":"); |
| 361 | |
| 362 | if (!strlen(number)) |
| 363 | goto out_reg; |
| 364 | |
| 365 | /* |
| 366 | * We use the callback data field (which is a pointer) |
| 367 | * as our counter. |
| 368 | */ |
Daniel Walter | bcd83ea | 2012-09-26 22:08:38 +0200 | [diff] [blame] | 369 | ret = kstrtoul(number, 0, (unsigned long *)&count); |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 370 | if (ret) |
| 371 | return ret; |
| 372 | |
| 373 | out_reg: |
Steven Rostedt | b6887d7 | 2009-02-17 12:32:04 -0500 | [diff] [blame] | 374 | ret = register_ftrace_function_probe(glob, ops, count); |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 375 | |
Xiao Guangrong | 04aef32 | 2009-07-15 12:29:06 +0800 | [diff] [blame] | 376 | return ret < 0 ? ret : 0; |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | static struct ftrace_func_command ftrace_traceon_cmd = { |
| 380 | .name = "traceon", |
| 381 | .func = ftrace_trace_onoff_callback, |
| 382 | }; |
| 383 | |
| 384 | static struct ftrace_func_command ftrace_traceoff_cmd = { |
| 385 | .name = "traceoff", |
| 386 | .func = ftrace_trace_onoff_callback, |
| 387 | }; |
| 388 | |
| 389 | static int __init init_func_cmd_traceon(void) |
| 390 | { |
| 391 | int ret; |
| 392 | |
| 393 | ret = register_ftrace_command(&ftrace_traceoff_cmd); |
| 394 | if (ret) |
| 395 | return ret; |
| 396 | |
| 397 | ret = register_ftrace_command(&ftrace_traceon_cmd); |
| 398 | if (ret) |
| 399 | unregister_ftrace_command(&ftrace_traceoff_cmd); |
| 400 | return ret; |
| 401 | } |
| 402 | #else |
| 403 | static inline int init_func_cmd_traceon(void) |
| 404 | { |
| 405 | return 0; |
| 406 | } |
| 407 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 408 | |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 409 | static __init int init_function_trace(void) |
| 410 | { |
Steven Rostedt | 23b4ff3a | 2009-02-14 19:04:24 -0500 | [diff] [blame] | 411 | init_func_cmd_traceon(); |
Steven Rostedt | 1b29b01 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 412 | return register_tracer(&function_trace); |
| 413 | } |
Steven Rostedt | 6f41567 | 2012-10-05 12:13:07 -0400 | [diff] [blame] | 414 | core_initcall(init_function_trace); |