blob: 40f64f7cd850aaf2d560a97f2f133ec927d0a01d [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
20#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020021#include <linux/kthread.h>
22#include <linux/hardirq.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020023#include <linux/ftrace.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020025#include <linux/sysctl.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020026#include <linux/hash.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020027#include <linux/ctype.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020028#include <linux/list.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020029
Steven Rostedt3d083392008-05-12 21:20:42 +020030#include "trace.h"
31
Steven Rostedt4eebcc82008-05-12 21:20:48 +020032/* ftrace_enabled is a method to turn ftrace on or off */
33int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020034static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020035
Steven Rostedt4eebcc82008-05-12 21:20:48 +020036/*
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
39 */
40static int ftrace_disabled __read_mostly;
41
Steven Rostedt3d083392008-05-12 21:20:42 +020042static DEFINE_SPINLOCK(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020043static DEFINE_MUTEX(ftrace_sysctl_lock);
44
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020045static struct ftrace_ops ftrace_list_end __read_mostly =
46{
47 .func = ftrace_stub,
48};
49
50static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
53/* mcount is defined per arch in assembly */
54EXPORT_SYMBOL(mcount);
55
Ingo Molnare309b412008-05-12 21:20:51 +020056void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020057{
58 struct ftrace_ops *op = ftrace_list;
59
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
62
63 while (op != &ftrace_list_end) {
64 /* silly alpha */
65 read_barrier_depends();
66 op->func(ip, parent_ip);
67 op = op->next;
68 };
69}
70
71/**
Steven Rostedt3d083392008-05-12 21:20:42 +020072 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020073 *
Steven Rostedt3d083392008-05-12 21:20:42 +020074 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020076 */
Steven Rostedt3d083392008-05-12 21:20:42 +020077void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020078{
Steven Rostedt3d083392008-05-12 21:20:42 +020079 ftrace_trace_function = ftrace_stub;
80}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020081
Ingo Molnare309b412008-05-12 21:20:51 +020082static int __register_ftrace_function(struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +020083{
84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock);
86
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020087 ops->next = ftrace_list;
88 /*
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
93 */
94 smp_wmb();
95 ftrace_list = ops;
Steven Rostedt3d083392008-05-12 21:20:42 +020096
Steven Rostedtb0fc4942008-05-12 21:20:43 +020097 if (ftrace_enabled) {
98 /*
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
101 */
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
104 else
105 ftrace_trace_function = ftrace_list_func;
106 }
Steven Rostedt3d083392008-05-12 21:20:42 +0200107
108 spin_unlock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200109
110 return 0;
111}
112
Ingo Molnare309b412008-05-12 21:20:51 +0200113static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200114{
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200115 struct ftrace_ops **p;
116 int ret = 0;
117
Steven Rostedt3d083392008-05-12 21:20:42 +0200118 spin_lock(&ftrace_lock);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200119
120 /*
Steven Rostedt3d083392008-05-12 21:20:42 +0200121 * If we are removing the last function, then simply point
122 * to the ftrace_stub.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200123 */
124 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125 ftrace_trace_function = ftrace_stub;
126 ftrace_list = &ftrace_list_end;
127 goto out;
128 }
129
130 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131 if (*p == ops)
132 break;
133
134 if (*p != ops) {
135 ret = -1;
136 goto out;
137 }
138
139 *p = (*p)->next;
140
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200141 if (ftrace_enabled) {
142 /* If we only have one func left, then call that directly */
143 if (ftrace_list == &ftrace_list_end ||
144 ftrace_list->next == &ftrace_list_end)
145 ftrace_trace_function = ftrace_list->func;
146 }
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200147
148 out:
Steven Rostedt3d083392008-05-12 21:20:42 +0200149 spin_unlock(&ftrace_lock);
150
151 return ret;
152}
153
154#ifdef CONFIG_DYNAMIC_FTRACE
155
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200156static struct task_struct *ftraced_task;
157static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
158static unsigned long ftraced_iteration_counter;
159
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200160enum {
161 FTRACE_ENABLE_CALLS = (1 << 0),
162 FTRACE_DISABLE_CALLS = (1 << 1),
163 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
164 FTRACE_ENABLE_MCOUNT = (1 << 3),
165 FTRACE_DISABLE_MCOUNT = (1 << 4),
166};
167
Steven Rostedt5072c592008-05-12 21:20:43 +0200168static int ftrace_filtered;
169
Steven Rostedt3d083392008-05-12 21:20:42 +0200170static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175static DEFINE_MUTEX(ftraced_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +0200176static DEFINE_MUTEX(ftrace_filter_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200177
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200178struct ftrace_page {
179 struct ftrace_page *next;
180 int index;
181 struct dyn_ftrace records[];
182} __attribute__((packed));
183
184#define ENTRIES_PER_PAGE \
185 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187/* estimate from running different kernels */
188#define NR_TO_INIT 10000
189
190static struct ftrace_page *ftrace_pages_start;
191static struct ftrace_page *ftrace_pages;
192
Steven Rostedt3d083392008-05-12 21:20:42 +0200193static int ftraced_trigger;
194static int ftraced_suspend;
195
196static int ftrace_record_suspend;
197
Steven Rostedt37ad50842008-05-12 21:20:48 +0200198static struct dyn_ftrace *ftrace_free_records;
199
Ingo Molnare309b412008-05-12 21:20:51 +0200200static inline int
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +0200201ftrace_ip_in_hash(unsigned long ip, unsigned long key)
Steven Rostedt3d083392008-05-12 21:20:42 +0200202{
203 struct dyn_ftrace *p;
204 struct hlist_node *t;
205 int found = 0;
206
207 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
208 if (p->ip == ip) {
209 found = 1;
210 break;
211 }
212 }
213
214 return found;
215}
216
Ingo Molnare309b412008-05-12 21:20:51 +0200217static inline void
Steven Rostedt3d083392008-05-12 21:20:42 +0200218ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
219{
220 hlist_add_head(&node->node, &ftrace_hash[key]);
221}
222
Ingo Molnare309b412008-05-12 21:20:51 +0200223static void ftrace_free_rec(struct dyn_ftrace *rec)
Steven Rostedt37ad50842008-05-12 21:20:48 +0200224{
225 /* no locking, only called from kstop_machine */
226
227 rec->ip = (unsigned long)ftrace_free_records;
228 ftrace_free_records = rec;
229 rec->flags |= FTRACE_FL_FREE;
230}
231
Ingo Molnare309b412008-05-12 21:20:51 +0200232static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200233{
Steven Rostedt37ad50842008-05-12 21:20:48 +0200234 struct dyn_ftrace *rec;
235
236 /* First check for freed records */
237 if (ftrace_free_records) {
238 rec = ftrace_free_records;
239
Steven Rostedt37ad50842008-05-12 21:20:48 +0200240 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
241 WARN_ON_ONCE(1);
242 ftrace_free_records = NULL;
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200243 ftrace_disabled = 1;
244 ftrace_enabled = 0;
Steven Rostedt37ad50842008-05-12 21:20:48 +0200245 return NULL;
246 }
247
248 ftrace_free_records = (void *)rec->ip;
249 memset(rec, 0, sizeof(*rec));
250 return rec;
251 }
252
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200253 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
254 if (!ftrace_pages->next)
255 return NULL;
256 ftrace_pages = ftrace_pages->next;
257 }
258
259 return &ftrace_pages->records[ftrace_pages->index++];
260}
261
Ingo Molnare309b412008-05-12 21:20:51 +0200262static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200263ftrace_record_ip(unsigned long ip)
Steven Rostedt3d083392008-05-12 21:20:42 +0200264{
265 struct dyn_ftrace *node;
266 unsigned long flags;
267 unsigned long key;
268 int resched;
269 int atomic;
270
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200271 if (!ftrace_enabled || ftrace_disabled)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200272 return;
273
Steven Rostedt3d083392008-05-12 21:20:42 +0200274 resched = need_resched();
275 preempt_disable_notrace();
276
277 /* We simply need to protect against recursion */
278 __get_cpu_var(ftrace_shutdown_disable_cpu)++;
279 if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
280 goto out;
281
282 if (unlikely(ftrace_record_suspend))
283 goto out;
284
285 key = hash_long(ip, FTRACE_HASHBITS);
286
287 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
288
289 if (ftrace_ip_in_hash(ip, key))
290 goto out;
291
292 atomic = irqs_disabled();
293
294 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
295
296 /* This ip may have hit the hash before the lock */
297 if (ftrace_ip_in_hash(ip, key))
298 goto out_unlock;
299
300 /*
301 * There's a slight race that the ftraced will update the
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200302 * hash and reset here. If it is already converted, skip it.
Steven Rostedt3d083392008-05-12 21:20:42 +0200303 */
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200304 if (ftrace_ip_converted(ip))
305 goto out_unlock;
306
307 node = ftrace_alloc_dyn_node(ip);
Steven Rostedt3d083392008-05-12 21:20:42 +0200308 if (!node)
309 goto out_unlock;
310
311 node->ip = ip;
312
313 ftrace_add_hash(node, key);
314
315 ftraced_trigger = 1;
316
317 out_unlock:
318 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
319 out:
320 __get_cpu_var(ftrace_shutdown_disable_cpu)--;
321
322 /* prevent recursion with scheduler */
323 if (resched)
324 preempt_enable_no_resched_notrace();
325 else
326 preempt_enable_notrace();
327}
328
Steven Rostedtcaf8cde2008-05-12 21:20:50 +0200329#define FTRACE_ADDR ((long)(ftrace_caller))
330#define MCOUNT_ADDR ((long)(mcount))
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200331
Ingo Molnare309b412008-05-12 21:20:51 +0200332static void
Steven Rostedt5072c592008-05-12 21:20:43 +0200333__ftrace_replace_code(struct dyn_ftrace *rec,
334 unsigned char *old, unsigned char *new, int enable)
335{
336 unsigned long ip;
337 int failed;
338
339 ip = rec->ip;
340
341 if (ftrace_filtered && enable) {
342 unsigned long fl;
343 /*
344 * If filtering is on:
345 *
346 * If this record is set to be filtered and
347 * is enabled then do nothing.
348 *
349 * If this record is set to be filtered and
350 * it is not enabled, enable it.
351 *
352 * If this record is not set to be filtered
353 * and it is not enabled do nothing.
354 *
355 * If this record is not set to be filtered and
356 * it is enabled, disable it.
357 */
358 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
359
360 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
361 (fl == 0))
362 return;
363
364 /*
365 * If it is enabled disable it,
366 * otherwise enable it!
367 */
368 if (fl == FTRACE_FL_ENABLED) {
369 /* swap new and old */
370 new = old;
371 old = ftrace_call_replace(ip, FTRACE_ADDR);
372 rec->flags &= ~FTRACE_FL_ENABLED;
373 } else {
374 new = ftrace_call_replace(ip, FTRACE_ADDR);
375 rec->flags |= FTRACE_FL_ENABLED;
376 }
377 } else {
378
379 if (enable)
380 new = ftrace_call_replace(ip, FTRACE_ADDR);
381 else
382 old = ftrace_call_replace(ip, FTRACE_ADDR);
383
384 if (enable) {
385 if (rec->flags & FTRACE_FL_ENABLED)
386 return;
387 rec->flags |= FTRACE_FL_ENABLED;
388 } else {
389 if (!(rec->flags & FTRACE_FL_ENABLED))
390 return;
391 rec->flags &= ~FTRACE_FL_ENABLED;
392 }
393 }
394
395 failed = ftrace_modify_code(ip, old, new);
Steven Rostedt37ad50842008-05-12 21:20:48 +0200396 if (failed) {
397 unsigned long key;
398 /* It is possible that the function hasn't been converted yet */
399 key = hash_long(ip, FTRACE_HASHBITS);
400 if (!ftrace_ip_in_hash(ip, key)) {
401 rec->flags |= FTRACE_FL_FAILED;
402 ftrace_free_rec(rec);
403 }
404
405 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200406}
407
Ingo Molnare309b412008-05-12 21:20:51 +0200408static void ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200409{
410 unsigned char *new = NULL, *old = NULL;
411 struct dyn_ftrace *rec;
412 struct ftrace_page *pg;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200413 int i;
414
Steven Rostedt5072c592008-05-12 21:20:43 +0200415 if (enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200416 old = ftrace_nop_replace();
417 else
418 new = ftrace_nop_replace();
419
420 for (pg = ftrace_pages_start; pg; pg = pg->next) {
421 for (i = 0; i < pg->index; i++) {
422 rec = &pg->records[i];
423
424 /* don't modify code that has already faulted */
425 if (rec->flags & FTRACE_FL_FAILED)
426 continue;
427
Steven Rostedt5072c592008-05-12 21:20:43 +0200428 __ftrace_replace_code(rec, old, new, enable);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200429 }
430 }
431}
432
Ingo Molnare309b412008-05-12 21:20:51 +0200433static void ftrace_shutdown_replenish(void)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200434{
435 if (ftrace_pages->next)
436 return;
437
438 /* allocate another page */
439 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
440}
Steven Rostedt3d083392008-05-12 21:20:42 +0200441
Ingo Molnare309b412008-05-12 21:20:51 +0200442static void
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200443ftrace_code_disable(struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200444{
445 unsigned long ip;
446 unsigned char *nop, *call;
447 int failed;
448
449 ip = rec->ip;
450
451 nop = ftrace_nop_replace();
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200452 call = ftrace_call_replace(ip, MCOUNT_ADDR);
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200453
454 failed = ftrace_modify_code(ip, call, nop);
Steven Rostedt37ad50842008-05-12 21:20:48 +0200455 if (failed) {
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200456 rec->flags |= FTRACE_FL_FAILED;
Steven Rostedt37ad50842008-05-12 21:20:48 +0200457 ftrace_free_rec(rec);
458 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200459}
460
Ingo Molnare309b412008-05-12 21:20:51 +0200461static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +0200462{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200463 unsigned long addr;
464 int *command = data;
465
466 if (*command & FTRACE_ENABLE_CALLS)
467 ftrace_replace_code(1);
468 else if (*command & FTRACE_DISABLE_CALLS)
469 ftrace_replace_code(0);
470
471 if (*command & FTRACE_UPDATE_TRACE_FUNC)
472 ftrace_update_ftrace_func(ftrace_trace_function);
473
474 if (*command & FTRACE_ENABLE_MCOUNT) {
475 addr = (unsigned long)ftrace_record_ip;
476 ftrace_mcount_set(&addr);
477 } else if (*command & FTRACE_DISABLE_MCOUNT) {
478 addr = (unsigned long)ftrace_stub;
479 ftrace_mcount_set(&addr);
480 }
481
482 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200483}
484
Ingo Molnare309b412008-05-12 21:20:51 +0200485static void ftrace_run_update_code(int command)
Steven Rostedt3d083392008-05-12 21:20:42 +0200486{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200487 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
Steven Rostedt3d083392008-05-12 21:20:42 +0200488}
489
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200490static ftrace_func_t saved_ftrace_func;
491
Ingo Molnare309b412008-05-12 21:20:51 +0200492static void ftrace_startup(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200493{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200494 int command = 0;
495
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200496 if (unlikely(ftrace_disabled))
497 return;
498
Steven Rostedt3d083392008-05-12 21:20:42 +0200499 mutex_lock(&ftraced_lock);
500 ftraced_suspend++;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200501 if (ftraced_suspend == 1)
502 command |= FTRACE_ENABLE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +0200503
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200504 if (saved_ftrace_func != ftrace_trace_function) {
505 saved_ftrace_func = ftrace_trace_function;
506 command |= FTRACE_UPDATE_TRACE_FUNC;
507 }
508
509 if (!command || !ftrace_enabled)
510 goto out;
511
512 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200513 out:
514 mutex_unlock(&ftraced_lock);
515}
516
Ingo Molnare309b412008-05-12 21:20:51 +0200517static void ftrace_shutdown(void)
Steven Rostedt3d083392008-05-12 21:20:42 +0200518{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200519 int command = 0;
520
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200521 if (unlikely(ftrace_disabled))
522 return;
523
Steven Rostedt3d083392008-05-12 21:20:42 +0200524 mutex_lock(&ftraced_lock);
525 ftraced_suspend--;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200526 if (!ftraced_suspend)
527 command |= FTRACE_DISABLE_CALLS;
528
529 if (saved_ftrace_func != ftrace_trace_function) {
530 saved_ftrace_func = ftrace_trace_function;
531 command |= FTRACE_UPDATE_TRACE_FUNC;
532 }
533
534 if (!command || !ftrace_enabled)
Steven Rostedt3d083392008-05-12 21:20:42 +0200535 goto out;
536
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200537 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +0200538 out:
539 mutex_unlock(&ftraced_lock);
540}
541
Ingo Molnare309b412008-05-12 21:20:51 +0200542static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200543{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200544 int command = FTRACE_ENABLE_MCOUNT;
545
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200546 if (unlikely(ftrace_disabled))
547 return;
548
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200549 mutex_lock(&ftraced_lock);
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200550 /* Force update next time */
551 saved_ftrace_func = NULL;
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200552 /* ftraced_suspend is true if we want ftrace running */
553 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200554 command |= FTRACE_ENABLE_CALLS;
555
556 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200557 mutex_unlock(&ftraced_lock);
558}
559
Ingo Molnare309b412008-05-12 21:20:51 +0200560static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200561{
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200562 int command = FTRACE_DISABLE_MCOUNT;
563
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200564 if (unlikely(ftrace_disabled))
565 return;
566
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200567 mutex_lock(&ftraced_lock);
568 /* ftraced_suspend is true if ftrace is running */
569 if (ftraced_suspend)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200570 command |= FTRACE_DISABLE_CALLS;
571
572 ftrace_run_update_code(command);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200573 mutex_unlock(&ftraced_lock);
574}
575
Steven Rostedt3d083392008-05-12 21:20:42 +0200576static cycle_t ftrace_update_time;
577static unsigned long ftrace_update_cnt;
578unsigned long ftrace_update_tot_cnt;
579
Ingo Molnare309b412008-05-12 21:20:51 +0200580static int __ftrace_update_code(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200581{
582 struct dyn_ftrace *p;
583 struct hlist_head head;
584 struct hlist_node *t;
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200585 int save_ftrace_enabled;
Steven Rostedt3d083392008-05-12 21:20:42 +0200586 cycle_t start, stop;
587 int i;
588
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200589 /* Don't be recording funcs now */
590 save_ftrace_enabled = ftrace_enabled;
591 ftrace_enabled = 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200592
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200593 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200594 ftrace_update_cnt = 0;
595
596 /* No locks needed, the machine is stopped! */
597 for (i = 0; i < FTRACE_HASHSIZE; i++) {
598 if (hlist_empty(&ftrace_hash[i]))
599 continue;
600
601 head = ftrace_hash[i];
602 INIT_HLIST_HEAD(&ftrace_hash[i]);
603
604 /* all CPUS are stopped, we are safe to modify code */
605 hlist_for_each_entry(p, t, &head, node) {
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200606 ftrace_code_disable(p);
Steven Rostedt3d083392008-05-12 21:20:42 +0200607 ftrace_update_cnt++;
608 }
609
610 }
611
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200612 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +0200613 ftrace_update_time = stop - start;
614 ftrace_update_tot_cnt += ftrace_update_cnt;
615
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200616 ftrace_enabled = save_ftrace_enabled;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200617
618 return 0;
619}
620
Ingo Molnare309b412008-05-12 21:20:51 +0200621static void ftrace_update_code(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200622{
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200623 if (unlikely(ftrace_disabled))
624 return;
625
Steven Rostedt3d083392008-05-12 21:20:42 +0200626 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
627}
628
Ingo Molnare309b412008-05-12 21:20:51 +0200629static int ftraced(void *ignore)
Steven Rostedt3d083392008-05-12 21:20:42 +0200630{
631 unsigned long usecs;
632
Steven Rostedt3d083392008-05-12 21:20:42 +0200633 while (!kthread_should_stop()) {
634
Steven Rostedt07a267cd2008-05-12 21:20:55 +0200635 set_current_state(TASK_INTERRUPTIBLE);
636
Steven Rostedt3d083392008-05-12 21:20:42 +0200637 /* check once a second */
638 schedule_timeout(HZ);
639
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200640 if (unlikely(ftrace_disabled))
641 continue;
642
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200643 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200644 mutex_lock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200645 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
Steven Rostedt3d083392008-05-12 21:20:42 +0200646 ftrace_record_suspend++;
647 ftrace_update_code();
648 usecs = nsecs_to_usecs(ftrace_update_time);
649 if (ftrace_update_tot_cnt > 100000) {
650 ftrace_update_tot_cnt = 0;
651 pr_info("hm, dftrace overflow: %lu change%s"
652 " (%lu total) in %lu usec%s\n",
653 ftrace_update_cnt,
654 ftrace_update_cnt != 1 ? "s" : "",
655 ftrace_update_tot_cnt,
656 usecs, usecs != 1 ? "s" : "");
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200657 ftrace_disabled = 1;
Steven Rostedt3d083392008-05-12 21:20:42 +0200658 WARN_ON_ONCE(1);
659 }
660 ftraced_trigger = 0;
661 ftrace_record_suspend--;
662 }
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200663 ftraced_iteration_counter++;
Steven Rostedt3d083392008-05-12 21:20:42 +0200664 mutex_unlock(&ftraced_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +0200665 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +0200666
Steven Rostedte1c08bd2008-05-12 21:20:44 +0200667 wake_up_interruptible(&ftraced_waiters);
668
Steven Rostedt3d083392008-05-12 21:20:42 +0200669 ftrace_shutdown_replenish();
Steven Rostedt3d083392008-05-12 21:20:42 +0200670 }
671 __set_current_state(TASK_RUNNING);
672 return 0;
673}
674
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200675static int __init ftrace_dyn_table_alloc(void)
676{
677 struct ftrace_page *pg;
678 int cnt;
679 int i;
Steven Rostedt3c1720f2008-05-12 21:20:43 +0200680
681 /* allocate a few pages */
682 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
683 if (!ftrace_pages_start)
684 return -1;
685
686 /*
687 * Allocate a few more pages.
688 *
689 * TODO: have some parser search vmlinux before
690 * final linking to find all calls to ftrace.
691 * Then we can:
692 * a) know how many pages to allocate.
693 * and/or
694 * b) set up the table then.
695 *
696 * The dynamic code is still necessary for
697 * modules.
698 */
699
700 pg = ftrace_pages = ftrace_pages_start;
701
702 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
703
704 for (i = 0; i < cnt; i++) {
705 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
706
707 /* If we fail, we'll try later anyway */
708 if (!pg->next)
709 break;
710
711 pg = pg->next;
712 }
713
714 return 0;
715}
716
Steven Rostedt5072c592008-05-12 21:20:43 +0200717enum {
718 FTRACE_ITER_FILTER = (1 << 0),
719 FTRACE_ITER_CONT = (1 << 1),
720};
721
722#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
723
724struct ftrace_iterator {
725 loff_t pos;
726 struct ftrace_page *pg;
727 unsigned idx;
728 unsigned flags;
729 unsigned char buffer[FTRACE_BUFF_MAX+1];
730 unsigned buffer_idx;
731 unsigned filtered;
732};
733
Ingo Molnare309b412008-05-12 21:20:51 +0200734static void *
Steven Rostedt5072c592008-05-12 21:20:43 +0200735t_next(struct seq_file *m, void *v, loff_t *pos)
736{
737 struct ftrace_iterator *iter = m->private;
738 struct dyn_ftrace *rec = NULL;
739
740 (*pos)++;
741
742 retry:
743 if (iter->idx >= iter->pg->index) {
744 if (iter->pg->next) {
745 iter->pg = iter->pg->next;
746 iter->idx = 0;
747 goto retry;
748 }
749 } else {
750 rec = &iter->pg->records[iter->idx++];
751 if ((rec->flags & FTRACE_FL_FAILED) ||
752 ((iter->flags & FTRACE_ITER_FILTER) &&
753 !(rec->flags & FTRACE_FL_FILTER))) {
754 rec = NULL;
755 goto retry;
756 }
757 }
758
759 iter->pos = *pos;
760
761 return rec;
762}
763
764static void *t_start(struct seq_file *m, loff_t *pos)
765{
766 struct ftrace_iterator *iter = m->private;
767 void *p = NULL;
768 loff_t l = -1;
769
770 if (*pos != iter->pos) {
771 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
772 ;
773 } else {
774 l = *pos;
775 p = t_next(m, p, &l);
776 }
777
778 return p;
779}
780
781static void t_stop(struct seq_file *m, void *p)
782{
783}
784
785static int t_show(struct seq_file *m, void *v)
786{
787 struct dyn_ftrace *rec = v;
788 char str[KSYM_SYMBOL_LEN];
789
790 if (!rec)
791 return 0;
792
793 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
794
795 seq_printf(m, "%s\n", str);
796
797 return 0;
798}
799
800static struct seq_operations show_ftrace_seq_ops = {
801 .start = t_start,
802 .next = t_next,
803 .stop = t_stop,
804 .show = t_show,
805};
806
Ingo Molnare309b412008-05-12 21:20:51 +0200807static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200808ftrace_avail_open(struct inode *inode, struct file *file)
809{
810 struct ftrace_iterator *iter;
811 int ret;
812
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200813 if (unlikely(ftrace_disabled))
814 return -ENODEV;
815
Steven Rostedt5072c592008-05-12 21:20:43 +0200816 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
817 if (!iter)
818 return -ENOMEM;
819
820 iter->pg = ftrace_pages_start;
821 iter->pos = -1;
822
823 ret = seq_open(file, &show_ftrace_seq_ops);
824 if (!ret) {
825 struct seq_file *m = file->private_data;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200826
Steven Rostedt5072c592008-05-12 21:20:43 +0200827 m->private = iter;
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200828 } else {
Steven Rostedt5072c592008-05-12 21:20:43 +0200829 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200830 }
Steven Rostedt5072c592008-05-12 21:20:43 +0200831
832 return ret;
833}
834
835int ftrace_avail_release(struct inode *inode, struct file *file)
836{
837 struct seq_file *m = (struct seq_file *)file->private_data;
838 struct ftrace_iterator *iter = m->private;
839
840 seq_release(inode, file);
841 kfree(iter);
Ingo Molnar4bf39a92008-05-12 21:20:46 +0200842
Steven Rostedt5072c592008-05-12 21:20:43 +0200843 return 0;
844}
845
Ingo Molnare309b412008-05-12 21:20:51 +0200846static void ftrace_filter_reset(void)
Steven Rostedt5072c592008-05-12 21:20:43 +0200847{
848 struct ftrace_page *pg;
849 struct dyn_ftrace *rec;
850 unsigned i;
851
852 /* keep kstop machine from running */
853 preempt_disable();
854 ftrace_filtered = 0;
855 pg = ftrace_pages_start;
856 while (pg) {
857 for (i = 0; i < pg->index; i++) {
858 rec = &pg->records[i];
859 if (rec->flags & FTRACE_FL_FAILED)
860 continue;
861 rec->flags &= ~FTRACE_FL_FILTER;
862 }
863 pg = pg->next;
864 }
865 preempt_enable();
866}
867
Ingo Molnare309b412008-05-12 21:20:51 +0200868static int
Steven Rostedt5072c592008-05-12 21:20:43 +0200869ftrace_filter_open(struct inode *inode, struct file *file)
870{
871 struct ftrace_iterator *iter;
872 int ret = 0;
873
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200874 if (unlikely(ftrace_disabled))
875 return -ENODEV;
876
Steven Rostedt5072c592008-05-12 21:20:43 +0200877 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
878 if (!iter)
879 return -ENOMEM;
880
881 mutex_lock(&ftrace_filter_lock);
882 if ((file->f_mode & FMODE_WRITE) &&
883 !(file->f_flags & O_APPEND))
884 ftrace_filter_reset();
885
886 if (file->f_mode & FMODE_READ) {
887 iter->pg = ftrace_pages_start;
888 iter->pos = -1;
889 iter->flags = FTRACE_ITER_FILTER;
890
891 ret = seq_open(file, &show_ftrace_seq_ops);
892 if (!ret) {
893 struct seq_file *m = file->private_data;
894 m->private = iter;
895 } else
896 kfree(iter);
897 } else
898 file->private_data = iter;
899 mutex_unlock(&ftrace_filter_lock);
900
901 return ret;
902}
903
Ingo Molnare309b412008-05-12 21:20:51 +0200904static ssize_t
Steven Rostedt5072c592008-05-12 21:20:43 +0200905ftrace_filter_read(struct file *file, char __user *ubuf,
906 size_t cnt, loff_t *ppos)
907{
908 if (file->f_mode & FMODE_READ)
909 return seq_read(file, ubuf, cnt, ppos);
910 else
911 return -EPERM;
912}
913
Ingo Molnare309b412008-05-12 21:20:51 +0200914static loff_t
Steven Rostedt5072c592008-05-12 21:20:43 +0200915ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
916{
917 loff_t ret;
918
919 if (file->f_mode & FMODE_READ)
920 ret = seq_lseek(file, offset, origin);
921 else
922 file->f_pos = ret = 1;
923
924 return ret;
925}
926
927enum {
928 MATCH_FULL,
929 MATCH_FRONT_ONLY,
930 MATCH_MIDDLE_ONLY,
931 MATCH_END_ONLY,
932};
933
Ingo Molnare309b412008-05-12 21:20:51 +0200934static void
Steven Rostedt5072c592008-05-12 21:20:43 +0200935ftrace_match(unsigned char *buff, int len)
936{
937 char str[KSYM_SYMBOL_LEN];
938 char *search = NULL;
939 struct ftrace_page *pg;
940 struct dyn_ftrace *rec;
941 int type = MATCH_FULL;
942 unsigned i, match = 0, search_len = 0;
943
944 for (i = 0; i < len; i++) {
945 if (buff[i] == '*') {
946 if (!i) {
947 search = buff + i + 1;
948 type = MATCH_END_ONLY;
949 search_len = len - (i + 1);
950 } else {
951 if (type == MATCH_END_ONLY) {
952 type = MATCH_MIDDLE_ONLY;
953 } else {
954 match = i;
955 type = MATCH_FRONT_ONLY;
956 }
957 buff[i] = 0;
958 break;
959 }
960 }
961 }
962
963 /* keep kstop machine from running */
964 preempt_disable();
965 ftrace_filtered = 1;
966 pg = ftrace_pages_start;
967 while (pg) {
968 for (i = 0; i < pg->index; i++) {
969 int matched = 0;
970 char *ptr;
971
972 rec = &pg->records[i];
973 if (rec->flags & FTRACE_FL_FAILED)
974 continue;
975 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
976 switch (type) {
977 case MATCH_FULL:
978 if (strcmp(str, buff) == 0)
979 matched = 1;
980 break;
981 case MATCH_FRONT_ONLY:
982 if (memcmp(str, buff, match) == 0)
983 matched = 1;
984 break;
985 case MATCH_MIDDLE_ONLY:
986 if (strstr(str, search))
987 matched = 1;
988 break;
989 case MATCH_END_ONLY:
990 ptr = strstr(str, search);
991 if (ptr && (ptr[search_len] == 0))
992 matched = 1;
993 break;
994 }
995 if (matched)
996 rec->flags |= FTRACE_FL_FILTER;
997 }
998 pg = pg->next;
999 }
1000 preempt_enable();
1001}
1002
Ingo Molnare309b412008-05-12 21:20:51 +02001003static ssize_t
Steven Rostedt5072c592008-05-12 21:20:43 +02001004ftrace_filter_write(struct file *file, const char __user *ubuf,
1005 size_t cnt, loff_t *ppos)
1006{
1007 struct ftrace_iterator *iter;
1008 char ch;
1009 size_t read = 0;
1010 ssize_t ret;
1011
1012 if (!cnt || cnt < 0)
1013 return 0;
1014
1015 mutex_lock(&ftrace_filter_lock);
1016
1017 if (file->f_mode & FMODE_READ) {
1018 struct seq_file *m = file->private_data;
1019 iter = m->private;
1020 } else
1021 iter = file->private_data;
1022
1023 if (!*ppos) {
1024 iter->flags &= ~FTRACE_ITER_CONT;
1025 iter->buffer_idx = 0;
1026 }
1027
1028 ret = get_user(ch, ubuf++);
1029 if (ret)
1030 goto out;
1031 read++;
1032 cnt--;
1033
1034 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1035 /* skip white space */
1036 while (cnt && isspace(ch)) {
1037 ret = get_user(ch, ubuf++);
1038 if (ret)
1039 goto out;
1040 read++;
1041 cnt--;
1042 }
1043
1044
1045 if (isspace(ch)) {
1046 file->f_pos += read;
1047 ret = read;
1048 goto out;
1049 }
1050
1051 iter->buffer_idx = 0;
1052 }
1053
1054 while (cnt && !isspace(ch)) {
1055 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1056 iter->buffer[iter->buffer_idx++] = ch;
1057 else {
1058 ret = -EINVAL;
1059 goto out;
1060 }
1061 ret = get_user(ch, ubuf++);
1062 if (ret)
1063 goto out;
1064 read++;
1065 cnt--;
1066 }
1067
1068 if (isspace(ch)) {
1069 iter->filtered++;
1070 iter->buffer[iter->buffer_idx] = 0;
1071 ftrace_match(iter->buffer, iter->buffer_idx);
1072 iter->buffer_idx = 0;
1073 } else
1074 iter->flags |= FTRACE_ITER_CONT;
1075
1076
1077 file->f_pos += read;
1078
1079 ret = read;
1080 out:
1081 mutex_unlock(&ftrace_filter_lock);
1082
1083 return ret;
1084}
1085
Steven Rostedt77a2b372008-05-12 21:20:45 +02001086/**
1087 * ftrace_set_filter - set a function to filter on in ftrace
1088 * @buf - the string that holds the function filter text.
1089 * @len - the length of the string.
1090 * @reset - non zero to reset all filters before applying this filter.
1091 *
1092 * Filters denote which functions should be enabled when tracing is enabled.
1093 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1094 */
Ingo Molnare309b412008-05-12 21:20:51 +02001095void ftrace_set_filter(unsigned char *buf, int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02001096{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001097 if (unlikely(ftrace_disabled))
1098 return;
1099
Steven Rostedt77a2b372008-05-12 21:20:45 +02001100 mutex_lock(&ftrace_filter_lock);
1101 if (reset)
1102 ftrace_filter_reset();
1103 if (buf)
1104 ftrace_match(buf, len);
1105 mutex_unlock(&ftrace_filter_lock);
1106}
1107
Ingo Molnare309b412008-05-12 21:20:51 +02001108static int
Steven Rostedt5072c592008-05-12 21:20:43 +02001109ftrace_filter_release(struct inode *inode, struct file *file)
1110{
1111 struct seq_file *m = (struct seq_file *)file->private_data;
1112 struct ftrace_iterator *iter;
1113
1114 mutex_lock(&ftrace_filter_lock);
1115 if (file->f_mode & FMODE_READ) {
1116 iter = m->private;
1117
1118 seq_release(inode, file);
1119 } else
1120 iter = file->private_data;
1121
1122 if (iter->buffer_idx) {
1123 iter->filtered++;
1124 iter->buffer[iter->buffer_idx] = 0;
1125 ftrace_match(iter->buffer, iter->buffer_idx);
1126 }
1127
1128 mutex_lock(&ftrace_sysctl_lock);
1129 mutex_lock(&ftraced_lock);
1130 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1131 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1132 mutex_unlock(&ftraced_lock);
1133 mutex_unlock(&ftrace_sysctl_lock);
1134
1135 kfree(iter);
1136 mutex_unlock(&ftrace_filter_lock);
1137 return 0;
1138}
1139
1140static struct file_operations ftrace_avail_fops = {
1141 .open = ftrace_avail_open,
1142 .read = seq_read,
1143 .llseek = seq_lseek,
1144 .release = ftrace_avail_release,
1145};
1146
1147static struct file_operations ftrace_filter_fops = {
1148 .open = ftrace_filter_open,
1149 .read = ftrace_filter_read,
1150 .write = ftrace_filter_write,
1151 .llseek = ftrace_filter_lseek,
1152 .release = ftrace_filter_release,
1153};
1154
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001155/**
1156 * ftrace_force_update - force an update to all recording ftrace functions
1157 *
1158 * The ftrace dynamic update daemon only wakes up once a second.
1159 * There may be cases where an update needs to be done immediately
1160 * for tests or internal kernel tracing to begin. This function
1161 * wakes the daemon to do an update and will not return until the
1162 * update is complete.
1163 */
1164int ftrace_force_update(void)
1165{
1166 unsigned long last_counter;
1167 DECLARE_WAITQUEUE(wait, current);
1168 int ret = 0;
1169
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001170 if (unlikely(ftrace_disabled))
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001171 return -ENODEV;
1172
1173 mutex_lock(&ftraced_lock);
1174 last_counter = ftraced_iteration_counter;
1175
1176 set_current_state(TASK_INTERRUPTIBLE);
1177 add_wait_queue(&ftraced_waiters, &wait);
1178
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001179 if (unlikely(!ftraced_task)) {
1180 ret = -ENODEV;
1181 goto out;
1182 }
1183
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001184 do {
1185 mutex_unlock(&ftraced_lock);
1186 wake_up_process(ftraced_task);
1187 schedule();
1188 mutex_lock(&ftraced_lock);
1189 if (signal_pending(current)) {
1190 ret = -EINTR;
1191 break;
1192 }
1193 set_current_state(TASK_INTERRUPTIBLE);
1194 } while (last_counter == ftraced_iteration_counter);
1195
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001196 out:
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001197 mutex_unlock(&ftraced_lock);
1198 remove_wait_queue(&ftraced_waiters, &wait);
1199 set_current_state(TASK_RUNNING);
1200
1201 return ret;
1202}
1203
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001204static void ftrace_force_shutdown(void)
1205{
1206 struct task_struct *task;
1207 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1208
1209 mutex_lock(&ftraced_lock);
1210 task = ftraced_task;
1211 ftraced_task = NULL;
1212 ftraced_suspend = -1;
1213 ftrace_run_update_code(command);
1214 mutex_unlock(&ftraced_lock);
1215
1216 if (task)
1217 kthread_stop(task);
1218}
1219
Steven Rostedt5072c592008-05-12 21:20:43 +02001220static __init int ftrace_init_debugfs(void)
1221{
1222 struct dentry *d_tracer;
1223 struct dentry *entry;
1224
1225 d_tracer = tracing_init_dentry();
1226
1227 entry = debugfs_create_file("available_filter_functions", 0444,
1228 d_tracer, NULL, &ftrace_avail_fops);
1229 if (!entry)
1230 pr_warning("Could not create debugfs "
1231 "'available_filter_functions' entry\n");
1232
1233 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1234 NULL, &ftrace_filter_fops);
1235 if (!entry)
1236 pr_warning("Could not create debugfs "
1237 "'set_ftrace_filter' entry\n");
1238 return 0;
1239}
1240
1241fs_initcall(ftrace_init_debugfs);
1242
Ingo Molnare309b412008-05-12 21:20:51 +02001243static int __init ftrace_dynamic_init(void)
Steven Rostedt3d083392008-05-12 21:20:42 +02001244{
1245 struct task_struct *p;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001246 unsigned long addr;
Steven Rostedt3d083392008-05-12 21:20:42 +02001247 int ret;
1248
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001249 addr = (unsigned long)ftrace_record_ip;
Ingo Molnar9ff9cdb2008-05-12 21:20:50 +02001250
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001251 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1252
1253 /* ftrace_dyn_arch_init places the return code in addr */
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001254 if (addr) {
1255 ret = (int)addr;
1256 goto failed;
1257 }
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001258
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001259 ret = ftrace_dyn_table_alloc();
Steven Rostedt3d083392008-05-12 21:20:42 +02001260 if (ret)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001261 goto failed;
Steven Rostedt3d083392008-05-12 21:20:42 +02001262
1263 p = kthread_run(ftraced, NULL, "ftraced");
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001264 if (IS_ERR(p)) {
1265 ret = -1;
1266 goto failed;
1267 }
Steven Rostedt3d083392008-05-12 21:20:42 +02001268
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001269 last_ftrace_enabled = ftrace_enabled = 1;
Steven Rostedte1c08bd2008-05-12 21:20:44 +02001270 ftraced_task = p;
Steven Rostedt3d083392008-05-12 21:20:42 +02001271
1272 return 0;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001273
1274 failed:
1275 ftrace_disabled = 1;
1276 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001277}
1278
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001279core_initcall(ftrace_dynamic_init);
Steven Rostedt3d083392008-05-12 21:20:42 +02001280#else
Ingo Molnarc7aafc52008-05-12 21:20:45 +02001281# define ftrace_startup() do { } while (0)
1282# define ftrace_shutdown() do { } while (0)
1283# define ftrace_startup_sysctl() do { } while (0)
1284# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001285# define ftrace_force_shutdown() do { } while (0)
Steven Rostedt3d083392008-05-12 21:20:42 +02001286#endif /* CONFIG_DYNAMIC_FTRACE */
1287
1288/**
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001289 * ftrace_kill - totally shutdown ftrace
1290 *
1291 * This is a safety measure. If something was detected that seems
1292 * wrong, calling this function will keep ftrace from doing
1293 * any more modifications, and updates.
1294 * used when something went wrong.
1295 */
1296void ftrace_kill(void)
1297{
1298 mutex_lock(&ftrace_sysctl_lock);
1299 ftrace_disabled = 1;
1300 ftrace_enabled = 0;
1301
1302 clear_ftrace_function();
1303 mutex_unlock(&ftrace_sysctl_lock);
1304
1305 /* Try to totally disable ftrace */
1306 ftrace_force_shutdown();
1307}
1308
1309/**
Steven Rostedt3d083392008-05-12 21:20:42 +02001310 * register_ftrace_function - register a function for profiling
1311 * @ops - ops structure that holds the function for profiling.
1312 *
1313 * Register a function to be called by all functions in the
1314 * kernel.
1315 *
1316 * Note: @ops->func and all the functions it calls must be labeled
1317 * with "notrace", otherwise it will go into a
1318 * recursive loop.
1319 */
1320int register_ftrace_function(struct ftrace_ops *ops)
1321{
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001322 int ret;
1323
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001324 if (unlikely(ftrace_disabled))
1325 return -1;
1326
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001327 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001328 ret = __register_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001329 ftrace_startup();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001330 mutex_unlock(&ftrace_sysctl_lock);
1331
1332 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02001333}
1334
1335/**
1336 * unregister_ftrace_function - unresgister a function for profiling.
1337 * @ops - ops structure that holds the function to unregister
1338 *
1339 * Unregister a function that was added to be called by ftrace profiling.
1340 */
1341int unregister_ftrace_function(struct ftrace_ops *ops)
1342{
1343 int ret;
1344
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001345 mutex_lock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001346 ret = __unregister_ftrace_function(ops);
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001347 ftrace_shutdown();
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001348 mutex_unlock(&ftrace_sysctl_lock);
1349
1350 return ret;
1351}
1352
Ingo Molnare309b412008-05-12 21:20:51 +02001353int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001354ftrace_enable_sysctl(struct ctl_table *table, int write,
Steven Rostedt5072c592008-05-12 21:20:43 +02001355 struct file *file, void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001356 loff_t *ppos)
1357{
1358 int ret;
1359
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001360 if (unlikely(ftrace_disabled))
1361 return -ENODEV;
1362
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001363 mutex_lock(&ftrace_sysctl_lock);
1364
Steven Rostedt5072c592008-05-12 21:20:43 +02001365 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001366
1367 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1368 goto out;
1369
1370 last_ftrace_enabled = ftrace_enabled;
1371
1372 if (ftrace_enabled) {
1373
1374 ftrace_startup_sysctl();
1375
1376 /* we are starting ftrace again */
1377 if (ftrace_list != &ftrace_list_end) {
1378 if (ftrace_list->next == &ftrace_list_end)
1379 ftrace_trace_function = ftrace_list->func;
1380 else
1381 ftrace_trace_function = ftrace_list_func;
1382 }
1383
1384 } else {
1385 /* stopping ftrace calls (just send to ftrace_stub) */
1386 ftrace_trace_function = ftrace_stub;
1387
1388 ftrace_shutdown_sysctl();
1389 }
1390
1391 out:
1392 mutex_unlock(&ftrace_sysctl_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001393 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001394}