blob: 89bd9a6f52ecedbe69f960d2a9ca2d4d2eaf6a5c [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010022#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020023#include <linux/uaccess.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010024#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020025#include <linux/sysctl.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020026#include <linux/ctype.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010027#include <linux/hash.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020028#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020029
Steven Rostedt3d083392008-05-12 21:20:42 +020030#include "trace.h"
31
Steven Rostedt4eebcc82008-05-12 21:20:48 +020032/* ftrace_enabled is a method to turn ftrace on or off */
33int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020034static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020035
Steven Rostedt4eebcc82008-05-12 21:20:48 +020036/*
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
39 */
40static int ftrace_disabled __read_mostly;
41
Steven Rostedt3d083392008-05-12 21:20:42 +020042static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020043static DEFINE_MUTEX(ftrace_sysctl_lock);
44
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020045static struct ftrace_ops ftrace_list_end __read_mostly =
46{
47 .func = ftrace_stub,
48};
49
50static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
Ingo Molnare309b412008-05-12 21:20:51 +020053void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020054{
55 struct ftrace_ops *op = ftrace_list;
56
57 /* in case someone actually ports this to alpha! */
58 read_barrier_depends();
59
60 while (op != &ftrace_list_end) {
61 /* silly alpha */
62 read_barrier_depends();
63 op->func(ip, parent_ip);
64 op = op->next;
65 };
66}
67
68/**
Steven Rostedt3d083392008-05-12 21:20:42 +020069 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020070 *
Steven Rostedt3d083392008-05-12 21:20:42 +020071 * This NULLs the ftrace function and in essence stops
72 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 */
Steven Rostedt3d083392008-05-12 21:20:42 +020074void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020075{
Steven Rostedt3d083392008-05-12 21:20:42 +020076 ftrace_trace_function = ftrace_stub;
77}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078
Ingo Molnare309b412008-05-12 21:20:51 +020079static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020080{
81 /* Should never be called by interrupts */
82 spin_lock(&ftrace_lock);
83
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020084 ops->next = ftrace_list;
85 /*
86 * We are entering ops into the ftrace_list but another
87 * CPU might be walking that list. We need to make sure
88 * the ops->next pointer is valid before another CPU sees
89 * the ops pointer included into the ftrace_list.
90 */
91 smp_wmb();
92 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020093
Steven Rostedtb0fc4942008-05-12 21:20:43 +020094 if (ftrace_enabled) {
95 /*
96 * For one func, simply call it directly.
97 * For more than one func, call the chain.
98 */
99 if (ops->next == &ftrace_list_end)
100 ftrace_trace_function = ops->func;
101 else
102 ftrace_trace_function = ftrace_list_func;
103 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200104
105 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200106
107 return 0;
108}
109
Ingo Molnare309b412008-05-12 21:20:51 +0200110static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200111{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200112 struct ftrace_ops **p;
113 int ret = 0;
114
Steven Rostedt3d083392008-05-12 21:20:42 +0200115 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200116
117 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200118 * If we are removing the last function, then simply point
119 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200120 */
121 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122 ftrace_trace_function = ftrace_stub;
123 ftrace_list = &ftrace_list_end;
124 goto out;
125 }
126
127 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128 if (*p == ops)
129 break;
130
131 if (*p != ops) {
132 ret = -1;
133 goto out;
134 }
135
136 *p = (*p)->next;
137
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200138 if (ftrace_enabled) {
139 /* If we only have one func left, then call that directly */
140 if (ftrace_list == &ftrace_list_end ||
141 ftrace_list->next == &ftrace_list_end)
142 ftrace_trace_function = ftrace_list->func;
143 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200144
145 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200146 spin_unlock(&ftrace_lock);
147
148 return ret;
149}
150
151#ifdef CONFIG_DYNAMIC_FTRACE
152
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200153static struct task_struct *ftraced_task;
154static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
155static unsigned long ftraced_iteration_counter;
156
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200157enum {
158 FTRACE_ENABLE_CALLS = (1 << 0),
159 FTRACE_DISABLE_CALLS = (1 << 1),
160 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
161 FTRACE_ENABLE_MCOUNT = (1 << 3),
162 FTRACE_DISABLE_MCOUNT = (1 << 4),
163};
164
Steven Rostedt5072c592008-05-12 21:20:43 +0200165static int ftrace_filtered;
166
Steven Rostedt3d083392008-05-12 21:20:42 +0200167static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
168
169static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
170
171static DEFINE_SPINLOCK(ftrace_shutdown_lock);
172static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200173static DEFINE_MUTEX(ftrace_filter_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200174
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200175struct ftrace_page {
176 struct ftrace_page *next;
David Milleraa5e5ce2008-05-13 22:06:56 -0700177 unsigned long index;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200178 struct dyn_ftrace records[];
David Milleraa5e5ce2008-05-13 22:06:56 -0700179};
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200180
181#define ENTRIES_PER_PAGE \
182 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
183
184/* estimate from running different kernels */
185#define NR_TO_INIT 10000
186
187static struct ftrace_page *ftrace_pages_start;
188static struct ftrace_page *ftrace_pages;
189
Steven Rostedt3d083392008-05-12 21:20:42 +0200190static int ftraced_trigger;
191static int ftraced_suspend;
192
193static int ftrace_record_suspend;
194
Steven Rostedt37ad50842008-05-12 21:20:48 +0200195static struct dyn_ftrace *ftrace_free_records;
196
Ingo Molnare309b412008-05-12 21:20:51 +0200197static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200198ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200199{
200 struct dyn_ftrace *p;
201 struct hlist_node *t;
202 int found = 0;
203
204 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
205 if (p->ip == ip) {
206 found = 1;
207 break;
208 }
209 }
210
211 return found;
212}
213
Ingo Molnare309b412008-05-12 21:20:51 +0200214static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200215ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
216{
217 hlist_add_head(&node->node, &ftrace_hash[key]);
218}
219
Ingo Molnare309b412008-05-12 21:20:51 +0200220static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad50842008-05-12 21:20:48 +0200221{
222 /* no locking, only called from kstop_machine */
223
224 rec->ip = (unsigned long)ftrace_free_records;
225 ftrace_free_records = rec;
226 rec->flags |= FTRACE_FL_FREE;
227}
228
Ingo Molnare309b412008-05-12 21:20:51 +0200229static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200230{
Steven Rostedt37ad50842008-05-12 21:20:48 +0200231 struct dyn_ftrace *rec;
232
233 /* First check for freed records */
234 if (ftrace_free_records) {
235 rec = ftrace_free_records;
236
Steven Rostedt37ad50842008-05-12 21:20:48 +0200237 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
238 WARN_ON_ONCE(1);
239 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200240 ftrace_disabled = 1;
241 ftrace_enabled = 0;
Steven Rostedt37ad50842008-05-12 21:20:48 +0200242 return NULL;
243 }
244
245 ftrace_free_records = (void *)rec->ip;
246 memset(rec, 0, sizeof(*rec));
247 return rec;
248 }
249
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200250 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
251 if (!ftrace_pages->next)
252 return NULL;
253 ftrace_pages = ftrace_pages->next;
254 }
255
256 return &ftrace_pages->records[ftrace_pages->index++];
257}
258
Ingo Molnare309b412008-05-12 21:20:51 +0200259static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200260ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200261{
262 struct dyn_ftrace *node;
263 unsigned long flags;
264 unsigned long key;
265 int resched;
266 int atomic;
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200267 int cpu;
Steven Rostedt3d083392008-05-12 21:20:42 +0200268
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200269 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200270 return;
271
Steven Rostedt3d083392008-05-12 21:20:42 +0200272 resched = need_resched();
273 preempt_disable_notrace();
274
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200275 /*
276 * We simply need to protect against recursion.
277 * Use the the raw version of smp_processor_id and not
278 * __get_cpu_var which can call debug hooks that can
279 * cause a recursive crash here.
280 */
281 cpu = raw_smp_processor_id();
282 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
283 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
Steven Rostedt3d083392008-05-12 21:20:42 +0200284 goto out;
285
286 if (unlikely(ftrace_record_suspend))
287 goto out;
288
289 key = hash_long(ip, FTRACE_HASHBITS);
290
291 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
292
293 if (ftrace_ip_in_hash(ip, key))
294 goto out;
295
296 atomic = irqs_disabled();
297
298 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
299
300 /* This ip may have hit the hash before the lock */
301 if (ftrace_ip_in_hash(ip, key))
302 goto out_unlock;
303
304 /*
305 * There's a slight race that the ftraced will update the
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200306 * hash and reset here. If it is already converted, skip it.
Steven Rostedt3d083392008-05-12 21:20:42 +0200307 */
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200308 if (ftrace_ip_converted(ip))
309 goto out_unlock;
310
311 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200312 if (!node)
313 goto out_unlock;
314
315 node->ip = ip;
316
317 ftrace_add_hash(node, key);
318
319 ftraced_trigger = 1;
320
321 out_unlock:
322 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
323 out:
Steven Rostedt2bb6f8d2008-05-12 21:21:02 +0200324 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
Steven Rostedt3d083392008-05-12 21:20:42 +0200325
326 /* prevent recursion with scheduler */
327 if (resched)
328 preempt_enable_no_resched_notrace();
329 else
330 preempt_enable_notrace();
331}
332
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200333#define FTRACE_ADDR ((long)(ftrace_caller))
334#define MCOUNT_ADDR ((long)(mcount))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200335
Ingo Molnare309b412008-05-12 21:20:51 +0200336static void
Steven Rostedt5072c592008-05-12 21:20:43 +0200337__ftrace_replace_code(struct dyn_ftrace *rec,
338 unsigned char *old, unsigned char *new, int enable)
339{
340 unsigned long ip;
341 int failed;
342
343 ip = rec->ip;
344
345 if (ftrace_filtered && enable) {
346 unsigned long fl;
347 /*
348 * If filtering is on:
349 *
350 * If this record is set to be filtered and
351 * is enabled then do nothing.
352 *
353 * If this record is set to be filtered and
354 * it is not enabled, enable it.
355 *
356 * If this record is not set to be filtered
357 * and it is not enabled do nothing.
358 *
359 * If this record is not set to be filtered and
360 * it is enabled, disable it.
361 */
362 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
363
364 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
365 (fl == 0))
366 return;
367
368 /*
369 * If it is enabled disable it,
370 * otherwise enable it!
371 */
372 if (fl == FTRACE_FL_ENABLED) {
373 /* swap new and old */
374 new = old;
375 old = ftrace_call_replace(ip, FTRACE_ADDR);
376 rec->flags &= ~FTRACE_FL_ENABLED;
377 } else {
378 new = ftrace_call_replace(ip, FTRACE_ADDR);
379 rec->flags |= FTRACE_FL_ENABLED;
380 }
381 } else {
382
383 if (enable)
384 new = ftrace_call_replace(ip, FTRACE_ADDR);
385 else
386 old = ftrace_call_replace(ip, FTRACE_ADDR);
387
388 if (enable) {
389 if (rec->flags & FTRACE_FL_ENABLED)
390 return;
391 rec->flags |= FTRACE_FL_ENABLED;
392 } else {
393 if (!(rec->flags & FTRACE_FL_ENABLED))
394 return;
395 rec->flags &= ~FTRACE_FL_ENABLED;
396 }
397 }
398
399 failed = ftrace_modify_code(ip, old, new);
Steven Rostedt37ad50842008-05-12 21:20:48 +0200400 if (failed) {
401 unsigned long key;
402 /* It is possible that the function hasn't been converted yet */
403 key = hash_long(ip, FTRACE_HASHBITS);
404 if (!ftrace_ip_in_hash(ip, key)) {
405 rec->flags |= FTRACE_FL_FAILED;
406 ftrace_free_rec(rec);
407 }
408
409 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200410}
411
Ingo Molnare309b412008-05-12 21:20:51 +0200412static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200413{
414 unsigned char *new = NULL, *old = NULL;
415 struct dyn_ftrace *rec;
416 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200417 int i;
418
Steven Rostedt5072c592008-05-12 21:20:43 +0200419 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200420 old = ftrace_nop_replace();
421 else
422 new = ftrace_nop_replace();
423
424 for (pg = ftrace_pages_start; pg; pg = pg->next) {
425 for (i = 0; i < pg->index; i++) {
426 rec = &pg->records[i];
427
428 /* don't modify code that has already faulted */
429 if (rec->flags & FTRACE_FL_FAILED)
430 continue;
431
Steven Rostedt5072c592008-05-12 21:20:43 +0200432 __ftrace_replace_code(rec, old, new, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200433 }
434 }
435}
436
Ingo Molnare309b412008-05-12 21:20:51 +0200437static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200438{
439 if (ftrace_pages->next)
440 return;
441
442 /* allocate another page */
443 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
444}
Steven Rostedt3d083392008-05-12 21:20:42 +0200445
Ingo Molnare309b412008-05-12 21:20:51 +0200446static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200447ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200448{
449 unsigned long ip;
450 unsigned char *nop, *call;
451 int failed;
452
453 ip = rec->ip;
454
455 nop = ftrace_nop_replace();
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200456 call = ftrace_call_replace(ip, MCOUNT_ADDR);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200457
458 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad50842008-05-12 21:20:48 +0200459 if (failed) {
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200460 rec->flags |= FTRACE_FL_FAILED;
Steven Rostedt37ad50842008-05-12 21:20:48 +0200461 ftrace_free_rec(rec);
462 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200463}
464
Ingo Molnare309b412008-05-12 21:20:51 +0200465static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200466{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200467 unsigned long addr;
468 int *command = data;
469
470 if (*command & FTRACE_ENABLE_CALLS)
471 ftrace_replace_code(1);
472 else if (*command & FTRACE_DISABLE_CALLS)
473 ftrace_replace_code(0);
474
475 if (*command & FTRACE_UPDATE_TRACE_FUNC)
476 ftrace_update_ftrace_func(ftrace_trace_function);
477
478 if (*command & FTRACE_ENABLE_MCOUNT) {
479 addr = (unsigned long)ftrace_record_ip;
480 ftrace_mcount_set(&addr);
481 } else if (*command & FTRACE_DISABLE_MCOUNT) {
482 addr = (unsigned long)ftrace_stub;
483 ftrace_mcount_set(&addr);
484 }
485
486 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200487}
488
Ingo Molnare309b412008-05-12 21:20:51 +0200489static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200490{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200491 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
Steven Rostedt3d083392008-05-12 21:20:42 +0200492}
493
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200494static ftrace_func_t saved_ftrace_func;
495
Ingo Molnare309b412008-05-12 21:20:51 +0200496static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200497{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200498 int command = 0;
499
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200500 if (unlikely(ftrace_disabled))
501 return;
502
Steven Rostedt3d083392008-05-12 21:20:42 +0200503 mutex_lock(&ftraced_lock);
504 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200505 if (ftraced_suspend == 1)
506 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200507
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200508 if (saved_ftrace_func != ftrace_trace_function) {
509 saved_ftrace_func = ftrace_trace_function;
510 command |= FTRACE_UPDATE_TRACE_FUNC;
511 }
512
513 if (!command || !ftrace_enabled)
514 goto out;
515
516 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200517 out:
518 mutex_unlock(&ftraced_lock);
519}
520
Ingo Molnare309b412008-05-12 21:20:51 +0200521static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200522{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200523 int command = 0;
524
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200525 if (unlikely(ftrace_disabled))
526 return;
527
Steven Rostedt3d083392008-05-12 21:20:42 +0200528 mutex_lock(&ftraced_lock);
529 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200530 if (!ftraced_suspend)
531 command |= FTRACE_DISABLE_CALLS;
532
533 if (saved_ftrace_func != ftrace_trace_function) {
534 saved_ftrace_func = ftrace_trace_function;
535 command |= FTRACE_UPDATE_TRACE_FUNC;
536 }
537
538 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200539 goto out;
540
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200541 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200542 out:
543 mutex_unlock(&ftraced_lock);
544}
545
Ingo Molnare309b412008-05-12 21:20:51 +0200546static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200547{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200548 int command = FTRACE_ENABLE_MCOUNT;
549
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200550 if (unlikely(ftrace_disabled))
551 return;
552
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200553 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200554 /* Force update next time */
555 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200556 /* ftraced_suspend is true if we want ftrace running */
557 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200558 command |= FTRACE_ENABLE_CALLS;
559
560 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200561 mutex_unlock(&ftraced_lock);
562}
563
Ingo Molnare309b412008-05-12 21:20:51 +0200564static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200565{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200566 int command = FTRACE_DISABLE_MCOUNT;
567
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200568 if (unlikely(ftrace_disabled))
569 return;
570
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200571 mutex_lock(&ftraced_lock);
572 /* ftraced_suspend is true if ftrace is running */
573 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200574 command |= FTRACE_DISABLE_CALLS;
575
576 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200577 mutex_unlock(&ftraced_lock);
578}
579
Steven Rostedt3d083392008-05-12 21:20:42 +0200580static cycle_t ftrace_update_time;
581static unsigned long ftrace_update_cnt;
582unsigned long ftrace_update_tot_cnt;
583
Ingo Molnare309b412008-05-12 21:20:51 +0200584static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200585{
586 struct dyn_ftrace *p;
587 struct hlist_head head;
588 struct hlist_node *t;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200589 int save_ftrace_enabled;
Steven Rostedt3d083392008-05-12 21:20:42 +0200590 cycle_t start, stop;
591 int i;
592
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200593 /* Don't be recording funcs now */
594 save_ftrace_enabled = ftrace_enabled;
595 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200596
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200597 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200598 ftrace_update_cnt = 0;
599
600 /* No locks needed, the machine is stopped! */
601 for (i = 0; i < FTRACE_HASHSIZE; i++) {
602 if (hlist_empty(&ftrace_hash[i]))
603 continue;
604
605 head = ftrace_hash[i];
606 INIT_HLIST_HEAD(&ftrace_hash[i]);
607
608 /* all CPUS are stopped, we are safe to modify code */
609 hlist_for_each_entry(p, t, &head, node) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200610 ftrace_code_disable(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200611 ftrace_update_cnt++;
612 }
613
614 }
615
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200616 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200617 ftrace_update_time = stop - start;
618 ftrace_update_tot_cnt += ftrace_update_cnt;
619
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200620 ftrace_enabled = save_ftrace_enabled;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200621
622 return 0;
623}
624
Ingo Molnare309b412008-05-12 21:20:51 +0200625static void ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200626{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200627 if (unlikely(ftrace_disabled))
628 return;
629
Steven Rostedt3d083392008-05-12 21:20:42 +0200630 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
631}
632
Ingo Molnare309b412008-05-12 21:20:51 +0200633static int ftraced(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200634{
635 unsigned long usecs;
636
Steven Rostedt3d083392008-05-12 21:20:42 +0200637 while (!kthread_should_stop()) {
638
Steven Rostedt07a267cd2008-05-12 21:20:55 +0200639 set_current_state(TASK_INTERRUPTIBLE);
640
Steven Rostedt3d083392008-05-12 21:20:42 +0200641 /* check once a second */
642 schedule_timeout(HZ);
643
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200644 if (unlikely(ftrace_disabled))
645 continue;
646
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200647 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200648 mutex_lock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200649 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200650 ftrace_record_suspend++;
651 ftrace_update_code();
652 usecs = nsecs_to_usecs(ftrace_update_time);
653 if (ftrace_update_tot_cnt > 100000) {
654 ftrace_update_tot_cnt = 0;
655 pr_info("hm, dftrace overflow: %lu change%s"
656 " (%lu total) in %lu usec%s\n",
657 ftrace_update_cnt,
658 ftrace_update_cnt != 1 ? "s" : "",
659 ftrace_update_tot_cnt,
660 usecs, usecs != 1 ? "s" : "");
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200661 ftrace_disabled = 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200662 WARN_ON_ONCE(1);
663 }
664 ftraced_trigger = 0;
665 ftrace_record_suspend--;
666 }
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200667 ftraced_iteration_counter++;
Steven Rostedt3d083392008-05-12 21:20:42 +0200668 mutex_unlock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200669 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200670
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200671 wake_up_interruptible(&ftraced_waiters);
672
Steven Rostedt3d083392008-05-12 21:20:42 +0200673 ftrace_shutdown_replenish();
Steven Rostedt3d083392008-05-12 21:20:42 +0200674 }
675 __set_current_state(TASK_RUNNING);
676 return 0;
677}
678
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200679static int __init ftrace_dyn_table_alloc(void)
680{
681 struct ftrace_page *pg;
682 int cnt;
683 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200684
685 /* allocate a few pages */
686 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
687 if (!ftrace_pages_start)
688 return -1;
689
690 /*
691 * Allocate a few more pages.
692 *
693 * TODO: have some parser search vmlinux before
694 * final linking to find all calls to ftrace.
695 * Then we can:
696 * a) know how many pages to allocate.
697 * and/or
698 * b) set up the table then.
699 *
700 * The dynamic code is still necessary for
701 * modules.
702 */
703
704 pg = ftrace_pages = ftrace_pages_start;
705
706 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
707
708 for (i = 0; i < cnt; i++) {
709 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
710
711 /* If we fail, we'll try later anyway */
712 if (!pg->next)
713 break;
714
715 pg = pg->next;
716 }
717
718 return 0;
719}
720
Steven Rostedt5072c592008-05-12 21:20:43 +0200721enum {
722 FTRACE_ITER_FILTER = (1 << 0),
723 FTRACE_ITER_CONT = (1 << 1),
724};
725
726#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
727
728struct ftrace_iterator {
729 loff_t pos;
730 struct ftrace_page *pg;
731 unsigned idx;
732 unsigned flags;
733 unsigned char buffer[FTRACE_BUFF_MAX+1];
734 unsigned buffer_idx;
735 unsigned filtered;
736};
737
Ingo Molnare309b412008-05-12 21:20:51 +0200738static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200739t_next(struct seq_file *m, void *v, loff_t *pos)
740{
741 struct ftrace_iterator *iter = m->private;
742 struct dyn_ftrace *rec = NULL;
743
744 (*pos)++;
745
746 retry:
747 if (iter->idx >= iter->pg->index) {
748 if (iter->pg->next) {
749 iter->pg = iter->pg->next;
750 iter->idx = 0;
751 goto retry;
752 }
753 } else {
754 rec = &iter->pg->records[iter->idx++];
755 if ((rec->flags & FTRACE_FL_FAILED) ||
756 ((iter->flags & FTRACE_ITER_FILTER) &&
757 !(rec->flags & FTRACE_FL_FILTER))) {
758 rec = NULL;
759 goto retry;
760 }
761 }
762
763 iter->pos = *pos;
764
765 return rec;
766}
767
768static void *t_start(struct seq_file *m, loff_t *pos)
769{
770 struct ftrace_iterator *iter = m->private;
771 void *p = NULL;
772 loff_t l = -1;
773
774 if (*pos != iter->pos) {
775 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
776 ;
777 } else {
778 l = *pos;
779 p = t_next(m, p, &l);
780 }
781
782 return p;
783}
784
785static void t_stop(struct seq_file *m, void *p)
786{
787}
788
789static int t_show(struct seq_file *m, void *v)
790{
791 struct dyn_ftrace *rec = v;
792 char str[KSYM_SYMBOL_LEN];
793
794 if (!rec)
795 return 0;
796
797 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
798
799 seq_printf(m, "%s\n", str);
800
801 return 0;
802}
803
804static struct seq_operations show_ftrace_seq_ops = {
805 .start = t_start,
806 .next = t_next,
807 .stop = t_stop,
808 .show = t_show,
809};
810
Ingo Molnare309b412008-05-12 21:20:51 +0200811static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200812ftrace_avail_open(struct inode *inode, struct file *file)
813{
814 struct ftrace_iterator *iter;
815 int ret;
816
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200817 if (unlikely(ftrace_disabled))
818 return -ENODEV;
819
Steven Rostedt5072c592008-05-12 21:20:43 +0200820 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
821 if (!iter)
822 return -ENOMEM;
823
824 iter->pg = ftrace_pages_start;
825 iter->pos = -1;
826
827 ret = seq_open(file, &show_ftrace_seq_ops);
828 if (!ret) {
829 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200830
Steven Rostedt5072c592008-05-12 21:20:43 +0200831 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200832 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200833 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200834 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200835
836 return ret;
837}
838
839int ftrace_avail_release(struct inode *inode, struct file *file)
840{
841 struct seq_file *m = (struct seq_file *)file->private_data;
842 struct ftrace_iterator *iter = m->private;
843
844 seq_release(inode, file);
845 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200846
Steven Rostedt5072c592008-05-12 21:20:43 +0200847 return 0;
848}
849
Ingo Molnare309b412008-05-12 21:20:51 +0200850static void ftrace_filter_reset(void)
Steven Rostedt5072c592008-05-12 21:20:43 +0200851{
852 struct ftrace_page *pg;
853 struct dyn_ftrace *rec;
854 unsigned i;
855
856 /* keep kstop machine from running */
857 preempt_disable();
858 ftrace_filtered = 0;
859 pg = ftrace_pages_start;
860 while (pg) {
861 for (i = 0; i < pg->index; i++) {
862 rec = &pg->records[i];
863 if (rec->flags & FTRACE_FL_FAILED)
864 continue;
865 rec->flags &= ~FTRACE_FL_FILTER;
866 }
867 pg = pg->next;
868 }
869 preempt_enable();
870}
871
Ingo Molnare309b412008-05-12 21:20:51 +0200872static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200873ftrace_filter_open(struct inode *inode, struct file *file)
874{
875 struct ftrace_iterator *iter;
876 int ret = 0;
877
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200878 if (unlikely(ftrace_disabled))
879 return -ENODEV;
880
Steven Rostedt5072c592008-05-12 21:20:43 +0200881 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
882 if (!iter)
883 return -ENOMEM;
884
885 mutex_lock(&ftrace_filter_lock);
886 if ((file->f_mode & FMODE_WRITE) &&
887 !(file->f_flags & O_APPEND))
888 ftrace_filter_reset();
889
890 if (file->f_mode & FMODE_READ) {
891 iter->pg = ftrace_pages_start;
892 iter->pos = -1;
893 iter->flags = FTRACE_ITER_FILTER;
894
895 ret = seq_open(file, &show_ftrace_seq_ops);
896 if (!ret) {
897 struct seq_file *m = file->private_data;
898 m->private = iter;
899 } else
900 kfree(iter);
901 } else
902 file->private_data = iter;
903 mutex_unlock(&ftrace_filter_lock);
904
905 return ret;
906}
907
Ingo Molnare309b412008-05-12 21:20:51 +0200908static ssize_t
Steven Rostedt5072c592008-05-12 21:20:43 +0200909ftrace_filter_read(struct file *file, char __user *ubuf,
910 size_t cnt, loff_t *ppos)
911{
912 if (file->f_mode & FMODE_READ)
913 return seq_read(file, ubuf, cnt, ppos);
914 else
915 return -EPERM;
916}
917
Ingo Molnare309b412008-05-12 21:20:51 +0200918static loff_t
Steven Rostedt5072c592008-05-12 21:20:43 +0200919ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
920{
921 loff_t ret;
922
923 if (file->f_mode & FMODE_READ)
924 ret = seq_lseek(file, offset, origin);
925 else
926 file->f_pos = ret = 1;
927
928 return ret;
929}
930
931enum {
932 MATCH_FULL,
933 MATCH_FRONT_ONLY,
934 MATCH_MIDDLE_ONLY,
935 MATCH_END_ONLY,
936};
937
Ingo Molnare309b412008-05-12 21:20:51 +0200938static void
Steven Rostedt5072c592008-05-12 21:20:43 +0200939ftrace_match(unsigned char *buff, int len)
940{
941 char str[KSYM_SYMBOL_LEN];
942 char *search = NULL;
943 struct ftrace_page *pg;
944 struct dyn_ftrace *rec;
945 int type = MATCH_FULL;
946 unsigned i, match = 0, search_len = 0;
947
948 for (i = 0; i < len; i++) {
949 if (buff[i] == '*') {
950 if (!i) {
951 search = buff + i + 1;
952 type = MATCH_END_ONLY;
953 search_len = len - (i + 1);
954 } else {
955 if (type == MATCH_END_ONLY) {
956 type = MATCH_MIDDLE_ONLY;
957 } else {
958 match = i;
959 type = MATCH_FRONT_ONLY;
960 }
961 buff[i] = 0;
962 break;
963 }
964 }
965 }
966
967 /* keep kstop machine from running */
968 preempt_disable();
969 ftrace_filtered = 1;
970 pg = ftrace_pages_start;
971 while (pg) {
972 for (i = 0; i < pg->index; i++) {
973 int matched = 0;
974 char *ptr;
975
976 rec = &pg->records[i];
977 if (rec->flags & FTRACE_FL_FAILED)
978 continue;
979 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
980 switch (type) {
981 case MATCH_FULL:
982 if (strcmp(str, buff) == 0)
983 matched = 1;
984 break;
985 case MATCH_FRONT_ONLY:
986 if (memcmp(str, buff, match) == 0)
987 matched = 1;
988 break;
989 case MATCH_MIDDLE_ONLY:
990 if (strstr(str, search))
991 matched = 1;
992 break;
993 case MATCH_END_ONLY:
994 ptr = strstr(str, search);
995 if (ptr && (ptr[search_len] == 0))
996 matched = 1;
997 break;
998 }
999 if (matched)
1000 rec->flags |= FTRACE_FL_FILTER;
1001 }
1002 pg = pg->next;
1003 }
1004 preempt_enable();
1005}
1006
Ingo Molnare309b412008-05-12 21:20:51 +02001007static ssize_t
Steven Rostedt5072c592008-05-12 21:20:43 +02001008ftrace_filter_write(struct file *file, const char __user *ubuf,
1009 size_t cnt, loff_t *ppos)
1010{
1011 struct ftrace_iterator *iter;
1012 char ch;
1013 size_t read = 0;
1014 ssize_t ret;
1015
1016 if (!cnt || cnt < 0)
1017 return 0;
1018
1019 mutex_lock(&ftrace_filter_lock);
1020
1021 if (file->f_mode & FMODE_READ) {
1022 struct seq_file *m = file->private_data;
1023 iter = m->private;
1024 } else
1025 iter = file->private_data;
1026
1027 if (!*ppos) {
1028 iter->flags &= ~FTRACE_ITER_CONT;
1029 iter->buffer_idx = 0;
1030 }
1031
1032 ret = get_user(ch, ubuf++);
1033 if (ret)
1034 goto out;
1035 read++;
1036 cnt--;
1037
1038 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1039 /* skip white space */
1040 while (cnt && isspace(ch)) {
1041 ret = get_user(ch, ubuf++);
1042 if (ret)
1043 goto out;
1044 read++;
1045 cnt--;
1046 }
1047
1048
1049 if (isspace(ch)) {
1050 file->f_pos += read;
1051 ret = read;
1052 goto out;
1053 }
1054
1055 iter->buffer_idx = 0;
1056 }
1057
1058 while (cnt && !isspace(ch)) {
1059 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1060 iter->buffer[iter->buffer_idx++] = ch;
1061 else {
1062 ret = -EINVAL;
1063 goto out;
1064 }
1065 ret = get_user(ch, ubuf++);
1066 if (ret)
1067 goto out;
1068 read++;
1069 cnt--;
1070 }
1071
1072 if (isspace(ch)) {
1073 iter->filtered++;
1074 iter->buffer[iter->buffer_idx] = 0;
1075 ftrace_match(iter->buffer, iter->buffer_idx);
1076 iter->buffer_idx = 0;
1077 } else
1078 iter->flags |= FTRACE_ITER_CONT;
1079
1080
1081 file->f_pos += read;
1082
1083 ret = read;
1084 out:
1085 mutex_unlock(&ftrace_filter_lock);
1086
1087 return ret;
1088}
1089
Steven Rostedt77a2b372008-05-12 21:20:45 +02001090/**
1091 * ftrace_set_filter - set a function to filter on in ftrace
1092 * @buf - the string that holds the function filter text.
1093 * @len - the length of the string.
1094 * @reset - non zero to reset all filters before applying this filter.
1095 *
1096 * Filters denote which functions should be enabled when tracing is enabled.
1097 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1098 */
Ingo Molnare309b412008-05-12 21:20:51 +02001099void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001100{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001101 if (unlikely(ftrace_disabled))
1102 return;
1103
Steven Rostedt77a2b372008-05-12 21:20:45 +02001104 mutex_lock(&ftrace_filter_lock);
1105 if (reset)
1106 ftrace_filter_reset();
1107 if (buf)
1108 ftrace_match(buf, len);
1109 mutex_unlock(&ftrace_filter_lock);
1110}
1111
Ingo Molnare309b412008-05-12 21:20:51 +02001112static int
Steven Rostedt5072c592008-05-12 21:20:43 +02001113ftrace_filter_release(struct inode *inode, struct file *file)
1114{
1115 struct seq_file *m = (struct seq_file *)file->private_data;
1116 struct ftrace_iterator *iter;
1117
1118 mutex_lock(&ftrace_filter_lock);
1119 if (file->f_mode & FMODE_READ) {
1120 iter = m->private;
1121
1122 seq_release(inode, file);
1123 } else
1124 iter = file->private_data;
1125
1126 if (iter->buffer_idx) {
1127 iter->filtered++;
1128 iter->buffer[iter->buffer_idx] = 0;
1129 ftrace_match(iter->buffer, iter->buffer_idx);
1130 }
1131
1132 mutex_lock(&ftrace_sysctl_lock);
1133 mutex_lock(&ftraced_lock);
1134 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1135 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1136 mutex_unlock(&ftraced_lock);
1137 mutex_unlock(&ftrace_sysctl_lock);
1138
1139 kfree(iter);
1140 mutex_unlock(&ftrace_filter_lock);
1141 return 0;
1142}
1143
1144static struct file_operations ftrace_avail_fops = {
1145 .open = ftrace_avail_open,
1146 .read = seq_read,
1147 .llseek = seq_lseek,
1148 .release = ftrace_avail_release,
1149};
1150
1151static struct file_operations ftrace_filter_fops = {
1152 .open = ftrace_filter_open,
1153 .read = ftrace_filter_read,
1154 .write = ftrace_filter_write,
1155 .llseek = ftrace_filter_lseek,
1156 .release = ftrace_filter_release,
1157};
1158
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001159/**
1160 * ftrace_force_update - force an update to all recording ftrace functions
1161 *
1162 * The ftrace dynamic update daemon only wakes up once a second.
1163 * There may be cases where an update needs to be done immediately
1164 * for tests or internal kernel tracing to begin. This function
1165 * wakes the daemon to do an update and will not return until the
1166 * update is complete.
1167 */
1168int ftrace_force_update(void)
1169{
1170 unsigned long last_counter;
1171 DECLARE_WAITQUEUE(wait, current);
1172 int ret = 0;
1173
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001174 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001175 return -ENODEV;
1176
1177 mutex_lock(&ftraced_lock);
1178 last_counter = ftraced_iteration_counter;
1179
1180 set_current_state(TASK_INTERRUPTIBLE);
1181 add_wait_queue(&ftraced_waiters, &wait);
1182
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001183 if (unlikely(!ftraced_task)) {
1184 ret = -ENODEV;
1185 goto out;
1186 }
1187
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001188 do {
1189 mutex_unlock(&ftraced_lock);
1190 wake_up_process(ftraced_task);
1191 schedule();
1192 mutex_lock(&ftraced_lock);
1193 if (signal_pending(current)) {
1194 ret = -EINTR;
1195 break;
1196 }
1197 set_current_state(TASK_INTERRUPTIBLE);
1198 } while (last_counter == ftraced_iteration_counter);
1199
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001200 out:
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001201 mutex_unlock(&ftraced_lock);
1202 remove_wait_queue(&ftraced_waiters, &wait);
1203 set_current_state(TASK_RUNNING);
1204
1205 return ret;
1206}
1207
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001208static void ftrace_force_shutdown(void)
1209{
1210 struct task_struct *task;
1211 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1212
1213 mutex_lock(&ftraced_lock);
1214 task = ftraced_task;
1215 ftraced_task = NULL;
1216 ftraced_suspend = -1;
1217 ftrace_run_update_code(command);
1218 mutex_unlock(&ftraced_lock);
1219
1220 if (task)
1221 kthread_stop(task);
1222}
1223
Steven Rostedt5072c592008-05-12 21:20:43 +02001224static __init int ftrace_init_debugfs(void)
1225{
1226 struct dentry *d_tracer;
1227 struct dentry *entry;
1228
1229 d_tracer = tracing_init_dentry();
1230
1231 entry = debugfs_create_file("available_filter_functions", 0444,
1232 d_tracer, NULL, &ftrace_avail_fops);
1233 if (!entry)
1234 pr_warning("Could not create debugfs "
1235 "'available_filter_functions' entry\n");
1236
1237 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1238 NULL, &ftrace_filter_fops);
1239 if (!entry)
1240 pr_warning("Could not create debugfs "
1241 "'set_ftrace_filter' entry\n");
1242 return 0;
1243}
1244
1245fs_initcall(ftrace_init_debugfs);
1246
Ingo Molnare309b412008-05-12 21:20:51 +02001247static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001248{
1249 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001250 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001251 int ret;
1252
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001253 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001254
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001255 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1256
1257 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001258 if (addr) {
1259 ret = (int)addr;
1260 goto failed;
1261 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001262
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001263 ret = ftrace_dyn_table_alloc();
Steven Rostedt3d083392008-05-12 21:20:42 +02001264 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001265 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001266
1267 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001268 if (IS_ERR(p)) {
1269 ret = -1;
1270 goto failed;
1271 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001272
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001273 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001274 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001275
1276 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001277
1278 failed:
1279 ftrace_disabled = 1;
1280 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001281}
1282
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001283core_initcall(ftrace_dynamic_init);
Steven Rostedt3d083392008-05-12 21:20:42 +02001284#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001285# define ftrace_startup() do { } while (0)
1286# define ftrace_shutdown() do { } while (0)
1287# define ftrace_startup_sysctl() do { } while (0)
1288# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001289# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001290#endif /* CONFIG_DYNAMIC_FTRACE */
1291
1292/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001293 * ftrace_kill - totally shutdown ftrace
1294 *
1295 * This is a safety measure. If something was detected that seems
1296 * wrong, calling this function will keep ftrace from doing
1297 * any more modifications, and updates.
1298 * used when something went wrong.
1299 */
1300void ftrace_kill(void)
1301{
1302 mutex_lock(&ftrace_sysctl_lock);
1303 ftrace_disabled = 1;
1304 ftrace_enabled = 0;
1305
1306 clear_ftrace_function();
1307 mutex_unlock(&ftrace_sysctl_lock);
1308
1309 /* Try to totally disable ftrace */
1310 ftrace_force_shutdown();
1311}
1312
1313/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001314 * register_ftrace_function - register a function for profiling
1315 * @ops - ops structure that holds the function for profiling.
1316 *
1317 * Register a function to be called by all functions in the
1318 * kernel.
1319 *
1320 * Note: @ops->func and all the functions it calls must be labeled
1321 * with "notrace", otherwise it will go into a
1322 * recursive loop.
1323 */
1324int register_ftrace_function(struct ftrace_ops *ops)
1325{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001326 int ret;
1327
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001328 if (unlikely(ftrace_disabled))
1329 return -1;
1330
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001331 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001332 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001333 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001334 mutex_unlock(&ftrace_sysctl_lock);
1335
1336 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001337}
1338
1339/**
1340 * unregister_ftrace_function - unresgister a function for profiling.
1341 * @ops - ops structure that holds the function to unregister
1342 *
1343 * Unregister a function that was added to be called by ftrace profiling.
1344 */
1345int unregister_ftrace_function(struct ftrace_ops *ops)
1346{
1347 int ret;
1348
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001349 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001350 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001351 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001352 mutex_unlock(&ftrace_sysctl_lock);
1353
1354 return ret;
1355}
1356
Ingo Molnare309b412008-05-12 21:20:51 +02001357int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001358ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001359 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001360 loff_t *ppos)
1361{
1362 int ret;
1363
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001364 if (unlikely(ftrace_disabled))
1365 return -ENODEV;
1366
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001367 mutex_lock(&ftrace_sysctl_lock);
1368
Steven Rostedt5072c592008-05-12 21:20:43 +02001369 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001370
1371 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1372 goto out;
1373
1374 last_ftrace_enabled = ftrace_enabled;
1375
1376 if (ftrace_enabled) {
1377
1378 ftrace_startup_sysctl();
1379
1380 /* we are starting ftrace again */
1381 if (ftrace_list != &ftrace_list_end) {
1382 if (ftrace_list->next == &ftrace_list_end)
1383 ftrace_trace_function = ftrace_list->func;
1384 else
1385 ftrace_trace_function = ftrace_list_func;
1386 }
1387
1388 } else {
1389 /* stopping ftrace calls (just send to ftrace_stub) */
1390 ftrace_trace_function = ftrace_stub;
1391
1392 ftrace_shutdown_sysctl();
1393 }
1394
1395 out:
1396 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001397 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001398}