blob: 861308072d2883bc3826d5023e82ec0c6d0d326d [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020015#include <linux/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Alexey Dobriyan405f5572009-07-11 22:08:37 +040020#include <linux/smp_lock.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040021#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050022#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020023#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050028#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020035#include <linux/ctype.h>
36#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020037#include <linux/poll.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/gfp.h>
39#include <linux/fs.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020040
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020041#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050042#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043
Steven Rostedt3928a8a2008-09-29 23:02:41 -040044#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
45
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Li Zefan020e5f82009-07-01 10:47:05 +080050int ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
Steven Rostedt0f048702008-11-05 16:05:44 -050080
81/*
82 * Kill all tracing for good (never come back).
83 * It is initialized to 1 but will turn to zero if the initialization
84 * of the tracer is successful. But that is the only place that sets
85 * this back to zero.
86 */
Hannes Eder4fd27352009-02-10 19:44:12 +010087static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050088
Frederic Weisbecker5e5bf482009-07-29 17:11:12 +020089DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040090
91static inline void ftrace_disable_cpu(void)
92{
93 preempt_disable();
94 local_inc(&__get_cpu_var(ftrace_cpu_disabled));
95}
96
97static inline void ftrace_enable_cpu(void)
98{
99 local_dec(&__get_cpu_var(ftrace_cpu_disabled));
100 preempt_enable();
101}
102
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030103static cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +0100105/* Define which cpu buffers are currently read in trace_pipe */
106static cpumask_var_t tracing_reader_cpumask;
107
Steven Rostedtab464282008-05-12 21:21:00 +0200108#define for_each_tracing_cpu(cpu) \
Rusty Russell9e01c1b2009-01-01 10:12:22 +1030109 for_each_cpu(cpu, tracing_buffer_mask)
Steven Rostedtab464282008-05-12 21:21:00 +0200110
Steven Rostedt944ac422008-10-23 19:26:08 -0400111/*
112 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
113 *
114 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
115 * is set, then ftrace_dump is called. This will output the contents
116 * of the ftrace buffers to the console. This is very useful for
117 * capturing traces that lead to crashes and outputing it to a
118 * serial console.
119 *
120 * It is default off, but you can enable it with either specifying
121 * "ftrace_dump_on_oops" in the kernel command line, or setting
122 * /proc/sys/kernel/ftrace_dump_on_oops to true.
123 */
124int ftrace_dump_on_oops;
125
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static int tracing_set_tracer(const char *buf);
127
Li Zefanee6c2c12009-09-18 14:06:47 +0800128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500130static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131
132static int __init set_ftrace(char *str)
133{
Li Zefanee6c2c12009-09-18 14:06:47 +0800134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500135 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400136 /* We are using ftrace early, expand it */
137 ring_buffer_expanded = 1;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100138 return 1;
139}
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500140__setup("ftrace=", set_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100141
Steven Rostedt944ac422008-10-23 19:26:08 -0400142static int __init set_ftrace_dump_on_oops(char *str)
143{
144 ftrace_dump_on_oops = 1;
145 return 1;
146}
147__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200148
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800149unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200150{
151 nsec += 500;
152 do_div(nsec, 1000);
153 return nsec;
154}
155
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200156/*
157 * The global_trace is the descriptor that holds the tracing
158 * buffers for the live tracing. For each CPU, it contains
159 * a link list of pages that will store trace entries. The
160 * page descriptor of the pages in the memory is used to hold
161 * the link list by linking the lru item in the page descriptor
162 * to each of the pages in the buffer per CPU.
163 *
164 * For each active CPU there is a data field that holds the
165 * pages for the buffer for that CPU. Each CPU has the same number
166 * of pages allocated for its buffer.
167 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200168static struct trace_array global_trace;
169
170static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
171
Steven Rostedte77405a2009-09-02 14:17:06 -0400172int filter_current_check_discard(struct ring_buffer *buffer,
173 struct ftrace_event_call *call, void *rec,
Tom Zanussieb02ce02009-04-08 03:15:54 -0500174 struct ring_buffer_event *event)
175{
Steven Rostedte77405a2009-09-02 14:17:06 -0400176 return filter_check_discard(call, rec, buffer, event);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500177}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400178EXPORT_SYMBOL_GPL(filter_current_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500179
Steven Rostedt37886f62009-03-17 17:22:06 -0400180cycle_t ftrace_now(int cpu)
181{
182 u64 ts;
183
184 /* Early boot up does not have a buffer yet */
185 if (!global_trace.buffer)
186 return trace_clock_local();
187
188 ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
189 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
190
191 return ts;
192}
193
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200194/*
195 * The max_tr is used to snapshot the global_trace when a maximum
196 * latency is reached. Some tracers will use this to store a maximum
197 * trace while it continues examining live traces.
198 *
199 * The buffers for the max_tr are set up the same as the global_trace.
200 * When a snapshot is taken, the link list of the max_tr is swapped
201 * with the link list of the global_trace and the buffers are reset for
202 * the global_trace so the tracing can continue.
203 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200204static struct trace_array max_tr;
205
206static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
207
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200208/* tracer_enabled is used to toggle activation of a tracer */
Steven Rostedt26994ea2008-05-12 21:20:48 +0200209static int tracer_enabled = 1;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200210
Steven Rostedt90369902008-11-05 16:05:44 -0500211/**
212 * tracing_is_enabled - return tracer_enabled status
213 *
214 * This function is used by other tracers to know the status
215 * of the tracer_enabled flag. Tracers may use this function
216 * to know if it should enable their features when starting
217 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
218 */
219int tracing_is_enabled(void)
220{
221 return tracer_enabled;
222}
223
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200224/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400225 * trace_buf_size is the size in bytes that is allocated
226 * for a buffer. Note, the number of bytes is always rounded
227 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400228 *
229 * This number is purposely set to a low number of 16384.
230 * If the dump on oops happens, it will be much appreciated
231 * to not have to wait for all that output. Anyway this can be
232 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200233 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400234#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400235
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400236static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200237
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200238/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200239static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200240
241/* current_trace points to the tracer that is currently active */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200242static struct tracer *current_trace __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200243
244/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200245 * trace_types_lock is used to protect the trace_types list.
246 * This lock is also used to keep user access serialized.
247 * Accesses from userspace will grab this lock while userspace
248 * activities happen inside the kernel.
249 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200250static DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200251
252/* trace_wait is a waitqueue for tasks blocked on trace_poll */
Ingo Molnar4e655512008-05-12 21:20:52 +0200253static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
254
Steven Rostedtee6bce52008-11-12 17:52:37 -0500255/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500256unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400257 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
258 TRACE_ITER_GRAPH_TIME;
Ingo Molnar4e655512008-05-12 21:20:52 +0200259
Steven Rostedtb8de7bd12009-08-31 22:32:27 -0400260static int trace_stop_count;
261static DEFINE_SPINLOCK(tracing_start_lock);
262
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200263/**
264 * trace_wake_up - wake up tasks waiting for trace input
265 *
266 * Simply wakes up any task that is blocked on the trace_wait
267 * queue. These is used with trace_poll for tasks polling the trace.
268 */
Ingo Molnar4e655512008-05-12 21:20:52 +0200269void trace_wake_up(void)
270{
Ingo Molnar017730c2008-05-12 21:20:52 +0200271 /*
272 * The runqueue_is_locked() can fail, but this is the best we
273 * have for now:
274 */
275 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
Ingo Molnar4e655512008-05-12 21:20:52 +0200276 wake_up(&trace_wait);
277}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200278
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400279static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200280{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400281 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200282
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200283 if (!str)
284 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800285 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200286 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800287 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200288 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400289 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200290 return 1;
291}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400292__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200293
Steven Rostedt57f50be2008-05-12 21:20:44 +0200294unsigned long nsecs_to_usecs(unsigned long nsecs)
295{
296 return nsecs / 1000;
297}
298
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200299/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200300static const char *trace_options[] = {
301 "print-parent",
302 "sym-offset",
303 "sym-addr",
304 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200305 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200306 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200307 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200308 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200309 "stacktrace",
Ingo Molnar4ac3ba42008-05-12 21:20:52 +0200310 "sched-tree",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100311 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500312 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500313 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500314 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200315 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200316 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100317 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200318 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500319 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400320 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400321 "graph-time",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200322 NULL
323};
324
Zhaolei5079f322009-08-25 16:12:56 +0800325static struct {
326 u64 (*func)(void);
327 const char *name;
328} trace_clocks[] = {
329 { trace_clock_local, "local" },
330 { trace_clock_global, "global" },
331};
332
333int trace_clock_id;
334
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200335/*
336 * trace_parser_get_init - gets the buffer for trace parser
337 */
338int trace_parser_get_init(struct trace_parser *parser, int size)
339{
340 memset(parser, 0, sizeof(*parser));
341
342 parser->buffer = kmalloc(size, GFP_KERNEL);
343 if (!parser->buffer)
344 return 1;
345
346 parser->size = size;
347 return 0;
348}
349
350/*
351 * trace_parser_put - frees the buffer for trace parser
352 */
353void trace_parser_put(struct trace_parser *parser)
354{
355 kfree(parser->buffer);
356}
357
358/*
359 * trace_get_user - reads the user input string separated by space
360 * (matched by isspace(ch))
361 *
362 * For each string found the 'struct trace_parser' is updated,
363 * and the function returns.
364 *
365 * Returns number of bytes read.
366 *
367 * See kernel/trace/trace.h for 'struct trace_parser' details.
368 */
369int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
370 size_t cnt, loff_t *ppos)
371{
372 char ch;
373 size_t read = 0;
374 ssize_t ret;
375
376 if (!*ppos)
377 trace_parser_clear(parser);
378
379 ret = get_user(ch, ubuf++);
380 if (ret)
381 goto out;
382
383 read++;
384 cnt--;
385
386 /*
387 * The parser is not finished with the last write,
388 * continue reading the user input without skipping spaces.
389 */
390 if (!parser->cont) {
391 /* skip white space */
392 while (cnt && isspace(ch)) {
393 ret = get_user(ch, ubuf++);
394 if (ret)
395 goto out;
396 read++;
397 cnt--;
398 }
399
400 /* only spaces were written */
401 if (isspace(ch)) {
402 *ppos += read;
403 ret = read;
404 goto out;
405 }
406
407 parser->idx = 0;
408 }
409
410 /* read the non-space input */
411 while (cnt && !isspace(ch)) {
412 if (parser->idx < parser->size)
413 parser->buffer[parser->idx++] = ch;
414 else {
415 ret = -EINVAL;
416 goto out;
417 }
418 ret = get_user(ch, ubuf++);
419 if (ret)
420 goto out;
421 read++;
422 cnt--;
423 }
424
425 /* We either got finished input or we have to wait for another call. */
426 if (isspace(ch)) {
427 parser->buffer[parser->idx] = 0;
428 parser->cont = false;
429 } else {
430 parser->cont = true;
431 parser->buffer[parser->idx++] = ch;
432 }
433
434 *ppos += read;
435 ret = read;
436
437out:
438 return ret;
439}
440
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200441ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
442{
443 int len;
444 int ret;
445
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500446 if (!cnt)
447 return 0;
448
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200449 if (s->len <= s->readpos)
450 return -EBUSY;
451
452 len = s->len - s->readpos;
453 if (cnt > len)
454 cnt = len;
455 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500456 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200457 return -EFAULT;
458
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500459 cnt -= ret;
460
Steven Rostedte74da522009-03-04 20:31:11 -0500461 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200462 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200463}
464
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200465static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200466{
467 int len;
468 void *ret;
469
470 if (s->len <= s->readpos)
471 return -EBUSY;
472
473 len = s->len - s->readpos;
474 if (cnt > len)
475 cnt = len;
476 ret = memcpy(buf, s->buffer + s->readpos, cnt);
477 if (!ret)
478 return -EFAULT;
479
Steven Rostedte74da522009-03-04 20:31:11 -0500480 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200481 return cnt;
482}
483
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400484/*
485 * ftrace_max_lock is used to protect the swapping of buffers
486 * when taking a max snapshot. The buffers themselves are
487 * protected by per_cpu spinlocks. But the action of the swap
488 * needs its own lock.
489 *
490 * This is defined as a raw_spinlock_t in order to help
491 * with performance when lockdep debugging is enabled.
492 *
493 * It is also used in other places outside the update_max_tr
494 * so it needs to be defined outside of the
495 * CONFIG_TRACER_MAX_TRACE.
496 */
497static raw_spinlock_t ftrace_max_lock =
498 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
499
500#ifdef CONFIG_TRACER_MAX_TRACE
501unsigned long __read_mostly tracing_max_latency;
502unsigned long __read_mostly tracing_thresh;
503
504/*
505 * Copy the new maximum trace into the separate maximum-trace
506 * structure. (this way the maximum trace is permanently saved,
507 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
508 */
509static void
510__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
511{
512 struct trace_array_cpu *data = tr->data[cpu];
Steven Rostedt8248ac02009-09-02 12:27:41 -0400513 struct trace_array_cpu *max_data = tr->data[cpu];
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400514
515 max_tr.cpu = cpu;
516 max_tr.time_start = data->preempt_timestamp;
517
Steven Rostedt8248ac02009-09-02 12:27:41 -0400518 max_data = max_tr.data[cpu];
519 max_data->saved_latency = tracing_max_latency;
520 max_data->critical_start = data->critical_start;
521 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400522
523 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400524 max_data->pid = tsk->pid;
525 max_data->uid = task_uid(tsk);
526 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
527 max_data->policy = tsk->policy;
528 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400529
530 /* record this tasks comm */
531 tracing_record_cmdline(tsk);
532}
533
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200534/**
535 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
536 * @tr: tracer
537 * @tsk: the task with the latency
538 * @cpu: The cpu that initiated the trace.
539 *
540 * Flip the buffers between the @tr and the max_tr and record information
541 * about which task was the cause of this latency.
542 */
Ingo Molnare309b412008-05-12 21:20:51 +0200543void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200544update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
545{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400546 struct ring_buffer *buf = tr->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200547
Steven Rostedtb8de7bd12009-08-31 22:32:27 -0400548 if (trace_stop_count)
549 return;
550
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200551 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt92205c22008-05-12 21:20:55 +0200552 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400553
554 tr->buffer = max_tr.buffer;
555 max_tr.buffer = buf;
556
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200557 __update_max_tr(tr, tsk, cpu);
Steven Rostedt92205c22008-05-12 21:20:55 +0200558 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200559}
560
561/**
562 * update_max_tr_single - only copy one trace over, and reset the rest
563 * @tr - tracer
564 * @tsk - task with the latency
565 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200566 *
567 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200568 */
Ingo Molnare309b412008-05-12 21:20:51 +0200569void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200570update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
571{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400572 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200573
Steven Rostedtb8de7bd12009-08-31 22:32:27 -0400574 if (trace_stop_count)
575 return;
576
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200577 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt92205c22008-05-12 21:20:55 +0200578 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200579
Steven Rostedtd7690412008-10-01 00:29:53 -0400580 ftrace_disable_cpu();
581
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400582 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
583
Steven Rostedte8165dbb2009-09-03 19:13:05 -0400584 if (ret == -EBUSY) {
585 /*
586 * We failed to swap the buffer due to a commit taking
587 * place on this CPU. We fail to record, but we reset
588 * the max trace buffer (no one writes directly to it)
589 * and flag that it failed.
590 */
591 trace_array_printk(&max_tr, _THIS_IP_,
592 "Failed to swap buffers due to commit in progress\n");
593 }
594
Steven Rostedtd7690412008-10-01 00:29:53 -0400595 ftrace_enable_cpu();
596
Steven Rostedte8165dbb2009-09-03 19:13:05 -0400597 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200598
599 __update_max_tr(tr, tsk, cpu);
Steven Rostedt92205c22008-05-12 21:20:55 +0200600 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200601}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400602#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200603
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200604/**
605 * register_tracer - register a tracer with the ftrace system.
606 * @type - the plugin for the tracer
607 *
608 * Register a new plugin tracer.
609 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200610int register_tracer(struct tracer *type)
Hannes Edere7669b82009-02-10 19:44:45 +0100611__releases(kernel_lock)
612__acquires(kernel_lock)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200613{
614 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200615 int ret = 0;
616
617 if (!type->name) {
618 pr_info("Tracer must have a name\n");
619 return -1;
620 }
621
Li Zefanee6c2c12009-09-18 14:06:47 +0800622 if (strlen(type->name) > MAX_TRACER_SIZE) {
623 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
624 return -1;
625 }
626
Ingo Molnar86fa2f62008-11-19 10:00:15 +0100627 /*
628 * When this gets called we hold the BKL which means that
629 * preemption is disabled. Various trace selftests however
630 * need to disable and enable preemption for successful tests.
631 * So we drop the BKL here and grab it after the tests again.
632 */
633 unlock_kernel();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200634 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +0100635
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +0100636 tracing_selftest_running = true;
637
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200638 for (t = trace_types; t; t = t->next) {
639 if (strcmp(type->name, t->name) == 0) {
640 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +0800641 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200642 type->name);
643 ret = -1;
644 goto out;
645 }
646 }
647
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +0100648 if (!type->set_flag)
649 type->set_flag = &dummy_set_flag;
650 if (!type->flags)
651 type->flags = &dummy_tracer_flags;
652 else
653 if (!type->flags->opts)
654 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +0100655 if (!type->wait_pipe)
656 type->wait_pipe = default_wait_pipe;
657
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +0100658
Steven Rostedt60a11772008-05-12 21:20:44 +0200659#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500660 if (type->selftest && !tracing_selftest_disabled) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200661 struct tracer *saved_tracer = current_trace;
Steven Rostedt60a11772008-05-12 21:20:44 +0200662 struct trace_array *tr = &global_trace;
Frederic Weisbeckerff325042008-12-04 23:47:35 +0100663
Steven Rostedt60a11772008-05-12 21:20:44 +0200664 /*
665 * Run a selftest on this tracer.
666 * Here we reset the trace buffer, and set the current
667 * tracer to be this tracer. The tracer can then run some
668 * internal tracing to verify that everything is in order.
669 * If we fail, we do not register this tracer.
670 */
Steven Rostedt76f0d072009-09-04 12:12:39 -0400671 tracing_reset_online_cpus(tr);
Ingo Molnar86fa2f62008-11-19 10:00:15 +0100672
Steven Rostedt60a11772008-05-12 21:20:44 +0200673 current_trace = type;
Steven Rostedt60a11772008-05-12 21:20:44 +0200674 /* the test is responsible for initializing and enabling */
675 pr_info("Testing tracer %s: ", type->name);
676 ret = type->selftest(type, tr);
677 /* the test is responsible for resetting too */
678 current_trace = saved_tracer;
Steven Rostedt60a11772008-05-12 21:20:44 +0200679 if (ret) {
680 printk(KERN_CONT "FAILED!\n");
681 goto out;
682 }
Steven Rostedt1d4db002008-05-12 21:20:45 +0200683 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt76f0d072009-09-04 12:12:39 -0400684 tracing_reset_online_cpus(tr);
Ingo Molnar86fa2f62008-11-19 10:00:15 +0100685
Steven Rostedt60a11772008-05-12 21:20:44 +0200686 printk(KERN_CONT "PASSED\n");
687 }
688#endif
689
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200690 type->next = trace_types;
691 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +0200692
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200693 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +0100694 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200695 mutex_unlock(&trace_types_lock);
696
Steven Rostedtdac74942009-02-05 01:13:38 -0500697 if (ret || !default_bootup_tracer)
698 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500699
Li Zefanee6c2c12009-09-18 14:06:47 +0800700 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -0500701 goto out_unlock;
702
703 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
704 /* Do we want this tracer to start on bootup? */
705 tracing_set_tracer(type->name);
706 default_bootup_tracer = NULL;
707 /* disable other selftests, since this will break it. */
708 tracing_selftest_disabled = 1;
709#ifdef CONFIG_FTRACE_STARTUP_TEST
710 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
711 type->name);
712#endif
713
714 out_unlock:
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500715 lock_kernel();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200716 return ret;
717}
718
719void unregister_tracer(struct tracer *type)
720{
721 struct tracer **t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200722
723 mutex_lock(&trace_types_lock);
724 for (t = &trace_types; *t; t = &(*t)->next) {
725 if (*t == type)
726 goto found;
727 }
Li Zefanee6c2c12009-09-18 14:06:47 +0800728 pr_info("Tracer %s not registered\n", type->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200729 goto out;
730
731 found:
732 *t = (*t)->next;
Arnaldo Carvalho de Melob5db03c2009-02-07 18:52:59 -0200733
734 if (type == current_trace && tracer_enabled) {
735 tracer_enabled = 0;
736 tracing_stop();
737 if (current_trace->stop)
738 current_trace->stop(&global_trace);
739 current_trace = &nop_trace;
740 }
Li Zefanee6c2c12009-09-18 14:06:47 +0800741out:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200742 mutex_unlock(&trace_types_lock);
743}
744
Steven Rostedtf6339032009-09-04 12:35:16 -0400745static void __tracing_reset(struct trace_array *tr, int cpu)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200746{
Steven Rostedtd7690412008-10-01 00:29:53 -0400747 ftrace_disable_cpu();
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400748 ring_buffer_reset_cpu(tr->buffer, cpu);
Steven Rostedtd7690412008-10-01 00:29:53 -0400749 ftrace_enable_cpu();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200750}
751
Steven Rostedtf6339032009-09-04 12:35:16 -0400752void tracing_reset(struct trace_array *tr, int cpu)
753{
754 struct ring_buffer *buffer = tr->buffer;
755
756 ring_buffer_record_disable(buffer);
757
758 /* Make sure all commits have finished */
759 synchronize_sched();
760 __tracing_reset(tr, cpu);
761
762 ring_buffer_record_enable(buffer);
763}
764
Pekka J Enberg213cc062008-12-19 12:08:39 +0200765void tracing_reset_online_cpus(struct trace_array *tr)
766{
Steven Rostedt621968c2009-09-04 12:02:35 -0400767 struct ring_buffer *buffer = tr->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +0200768 int cpu;
769
Steven Rostedt621968c2009-09-04 12:02:35 -0400770 ring_buffer_record_disable(buffer);
771
772 /* Make sure all commits have finished */
773 synchronize_sched();
774
Pekka J Enberg213cc062008-12-19 12:08:39 +0200775 tr->time_start = ftrace_now(tr->cpu);
776
777 for_each_online_cpu(cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -0400778 __tracing_reset(tr, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -0400779
780 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +0200781}
782
Steven Rostedt9456f0f2009-05-06 21:54:09 -0400783void tracing_reset_current(int cpu)
784{
785 tracing_reset(&global_trace, cpu);
786}
787
788void tracing_reset_current_online_cpus(void)
789{
790 tracing_reset_online_cpus(&global_trace);
791}
792
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200793#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +0100794#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200795static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
796static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
797static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
798static int cmdline_idx;
Peter Zijlstraefed7922009-03-04 12:32:55 +0100799static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +0200800
Steven Rostedt25b0b442008-05-12 21:21:00 +0200801/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +0100802static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200803
804static void trace_init_cmdlines(void)
805{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +0100806 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
807 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200808 cmdline_idx = 0;
809}
810
Carsten Emdeb5130b12009-09-13 01:43:07 +0200811int is_tracing_stopped(void)
812{
813 return trace_stop_count;
814}
815
Steven Rostedt0f048702008-11-05 16:05:44 -0500816/**
Steven Rostedt69bb54e2008-11-21 12:59:38 -0500817 * ftrace_off_permanent - disable all ftrace code permanently
818 *
819 * This should only be called when a serious anomally has
820 * been detected. This will turn off the function tracing,
821 * ring buffers, and other tracing utilites. It takes no
822 * locks and can be called from any context.
823 */
824void ftrace_off_permanent(void)
825{
826 tracing_disabled = 1;
827 ftrace_stop();
828 tracing_off_permanent();
829}
830
831/**
Steven Rostedt0f048702008-11-05 16:05:44 -0500832 * tracing_start - quick start of the tracer
833 *
834 * If tracing is enabled but was stopped by tracing_stop,
835 * this will start the tracer back up.
836 */
837void tracing_start(void)
838{
839 struct ring_buffer *buffer;
840 unsigned long flags;
841
842 if (tracing_disabled)
843 return;
844
845 spin_lock_irqsave(&tracing_start_lock, flags);
Steven Rostedtb06a8302009-01-22 14:26:15 -0500846 if (--trace_stop_count) {
847 if (trace_stop_count < 0) {
848 /* Someone screwed up their debugging */
849 WARN_ON_ONCE(1);
850 trace_stop_count = 0;
851 }
Steven Rostedt0f048702008-11-05 16:05:44 -0500852 goto out;
853 }
854
855
856 buffer = global_trace.buffer;
857 if (buffer)
858 ring_buffer_record_enable(buffer);
859
860 buffer = max_tr.buffer;
861 if (buffer)
862 ring_buffer_record_enable(buffer);
863
864 ftrace_start();
865 out:
866 spin_unlock_irqrestore(&tracing_start_lock, flags);
867}
868
869/**
870 * tracing_stop - quick stop of the tracer
871 *
872 * Light weight way to stop tracing. Use in conjunction with
873 * tracing_start.
874 */
875void tracing_stop(void)
876{
877 struct ring_buffer *buffer;
878 unsigned long flags;
879
880 ftrace_stop();
881 spin_lock_irqsave(&tracing_start_lock, flags);
882 if (trace_stop_count++)
883 goto out;
884
885 buffer = global_trace.buffer;
886 if (buffer)
887 ring_buffer_record_disable(buffer);
888
889 buffer = max_tr.buffer;
890 if (buffer)
891 ring_buffer_record_disable(buffer);
892
893 out:
894 spin_unlock_irqrestore(&tracing_start_lock, flags);
895}
896
Ingo Molnare309b412008-05-12 21:20:51 +0200897void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200898
Ingo Molnare309b412008-05-12 21:20:51 +0200899static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200900{
Carsten Emdea635cf02009-03-18 09:00:41 +0100901 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200902
903 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
904 return;
905
906 /*
907 * It's not the end of the world if we don't get
908 * the lock, but we also don't want to spin
909 * nor do we want to disable interrupts,
910 * so if we miss here, then better luck next time.
911 */
Peter Zijlstraefed7922009-03-04 12:32:55 +0100912 if (!__raw_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200913 return;
914
915 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +0100916 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200917 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
918
Carsten Emdea635cf02009-03-18 09:00:41 +0100919 /*
920 * Check whether the cmdline buffer at idx has a pid
921 * mapped. We are going to overwrite that entry so we
922 * need to clear the map_pid_to_cmdline. Otherwise we
923 * would read the new comm for the old pid.
924 */
925 pid = map_cmdline_to_pid[idx];
926 if (pid != NO_CMDLINE_MAP)
927 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200928
Carsten Emdea635cf02009-03-18 09:00:41 +0100929 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200930 map_pid_to_cmdline[tsk->pid] = idx;
931
932 cmdline_idx = idx;
933 }
934
935 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
936
Peter Zijlstraefed7922009-03-04 12:32:55 +0100937 __raw_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200938}
939
Steven Rostedt4ca530852009-03-16 19:20:15 -0400940void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200941{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200942 unsigned map;
943
Steven Rostedt4ca530852009-03-16 19:20:15 -0400944 if (!pid) {
945 strcpy(comm, "<idle>");
946 return;
947 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200948
Steven Rostedt4ca530852009-03-16 19:20:15 -0400949 if (pid > PID_MAX_DEFAULT) {
950 strcpy(comm, "<...>");
951 return;
952 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200953
Heiko Carstens5b6045a2009-05-26 17:28:02 +0200954 preempt_disable();
Steven Rostedt4ca530852009-03-16 19:20:15 -0400955 __raw_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200956 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +0100957 if (map != NO_CMDLINE_MAP)
958 strcpy(comm, saved_cmdlines[map]);
959 else
960 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200961
Steven Rostedt4ca530852009-03-16 19:20:15 -0400962 __raw_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +0200963 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200964}
965
Ingo Molnare309b412008-05-12 21:20:51 +0200966void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200967{
Thomas Gleixner18aecd32009-03-18 08:56:58 +0100968 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
969 !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200970 return;
971
972 trace_save_cmdline(tsk);
973}
974
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +0300975void
Steven Rostedt38697052008-10-01 13:14:09 -0400976tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
977 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200978{
979 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200980
Steven Rostedt777e2082008-09-29 23:02:42 -0400981 entry->preempt_count = pc & 0xff;
982 entry->pid = (tsk) ? tsk->pid : 0;
Steven Rostedt637e7e82009-09-11 13:55:35 -0400983 entry->lock_depth = (tsk) ? tsk->lock_depth : 0;
Steven Rostedt777e2082008-09-29 23:02:42 -0400984 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -0400985#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -0400986 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -0400987#else
988 TRACE_FLAG_IRQS_NOSUPPORT |
989#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200990 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
991 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
992 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
993}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +0200994EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200995
Steven Rostedte77405a2009-09-02 14:17:06 -0400996struct ring_buffer_event *
997trace_buffer_lock_reserve(struct ring_buffer *buffer,
998 int type,
999 unsigned long len,
1000 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001001{
1002 struct ring_buffer_event *event;
1003
Steven Rostedte77405a2009-09-02 14:17:06 -04001004 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001005 if (event != NULL) {
1006 struct trace_entry *ent = ring_buffer_event_data(event);
1007
1008 tracing_generic_entry_update(ent, flags, pc);
1009 ent->type = type;
1010 }
1011
1012 return event;
1013}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001014
Steven Rostedte77405a2009-09-02 14:17:06 -04001015static inline void
1016__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1017 struct ring_buffer_event *event,
1018 unsigned long flags, int pc,
1019 int wake)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001020{
Steven Rostedte77405a2009-09-02 14:17:06 -04001021 ring_buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001022
Steven Rostedte77405a2009-09-02 14:17:06 -04001023 ftrace_trace_stack(buffer, flags, 6, pc);
1024 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001025
1026 if (wake)
1027 trace_wake_up();
1028}
1029
Steven Rostedte77405a2009-09-02 14:17:06 -04001030void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1031 struct ring_buffer_event *event,
1032 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001033{
Steven Rostedte77405a2009-09-02 14:17:06 -04001034 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001035}
1036
Steven Rostedtef5580d2009-02-27 19:38:04 -05001037struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001038trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1039 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001040 unsigned long flags, int pc)
1041{
Steven Rostedte77405a2009-09-02 14:17:06 -04001042 *current_rb = global_trace.buffer;
1043 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001044 type, len, flags, pc);
1045}
Steven Rostedt94487d62009-05-05 19:22:53 -04001046EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001047
Steven Rostedte77405a2009-09-02 14:17:06 -04001048void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1049 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001050 unsigned long flags, int pc)
1051{
Steven Rostedte77405a2009-09-02 14:17:06 -04001052 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001053}
Steven Rostedt94487d62009-05-05 19:22:53 -04001054EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001055
Steven Rostedte77405a2009-09-02 14:17:06 -04001056void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
1057 struct ring_buffer_event *event,
1058 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001059{
Steven Rostedte77405a2009-09-02 14:17:06 -04001060 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001061}
Steven Rostedt94487d62009-05-05 19:22:53 -04001062EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
Steven Rostedt77d9f462009-04-02 01:16:59 -04001063
Steven Rostedte77405a2009-09-02 14:17:06 -04001064void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1065 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001066{
Steven Rostedte77405a2009-09-02 14:17:06 -04001067 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001068}
Steven Rostedt12acd472009-04-17 16:01:56 -04001069EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001070
Ingo Molnare309b412008-05-12 21:20:51 +02001071void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001072trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001073 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1074 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001075{
Tom Zanussie1112b42009-03-31 00:48:49 -05001076 struct ftrace_event_call *call = &event_function;
Steven Rostedte77405a2009-09-02 14:17:06 -04001077 struct ring_buffer *buffer = tr->buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001078 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001079 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001080
Steven Rostedtd7690412008-10-01 00:29:53 -04001081 /* If we are reading the ring buffer, don't trace */
1082 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
1083 return;
1084
Steven Rostedte77405a2009-09-02 14:17:06 -04001085 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001086 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001087 if (!event)
1088 return;
1089 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001090 entry->ip = ip;
1091 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001092
Steven Rostedte77405a2009-09-02 14:17:06 -04001093 if (!filter_check_discard(call, entry, buffer, event))
1094 ring_buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001095}
1096
Ingo Molnare309b412008-05-12 21:20:51 +02001097void
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001098ftrace(struct trace_array *tr, struct trace_array_cpu *data,
Steven Rostedt38697052008-10-01 13:14:09 -04001099 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1100 int pc)
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001101{
1102 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001103 trace_function(tr, ip, parent_ip, flags, pc);
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001104}
1105
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001106#ifdef CONFIG_STACKTRACE
Steven Rostedte77405a2009-09-02 14:17:06 -04001107static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001108 unsigned long flags,
1109 int skip, int pc)
Ingo Molnar86387f72008-05-12 21:20:51 +02001110{
Tom Zanussie1112b42009-03-31 00:48:49 -05001111 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001112 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001113 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001114 struct stack_trace trace;
1115
Steven Rostedte77405a2009-09-02 14:17:06 -04001116 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001117 sizeof(*entry), flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001118 if (!event)
1119 return;
1120 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001121 memset(&entry->caller, 0, sizeof(entry->caller));
Ingo Molnar86387f72008-05-12 21:20:51 +02001122
1123 trace.nr_entries = 0;
1124 trace.max_entries = FTRACE_STACK_ENTRIES;
1125 trace.skip = skip;
Steven Rostedt777e2082008-09-29 23:02:42 -04001126 trace.entries = entry->caller;
Ingo Molnar86387f72008-05-12 21:20:51 +02001127
1128 save_stack_trace(&trace);
Steven Rostedte77405a2009-09-02 14:17:06 -04001129 if (!filter_check_discard(call, entry, buffer, event))
1130 ring_buffer_unlock_commit(buffer, event);
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001131}
1132
Steven Rostedte77405a2009-09-02 14:17:06 -04001133void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1134 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001135{
1136 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1137 return;
1138
Steven Rostedte77405a2009-09-02 14:17:06 -04001139 __ftrace_trace_stack(buffer, flags, skip, pc);
Steven Rostedt53614992009-01-15 19:12:40 -05001140}
1141
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001142void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1143 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001144{
Steven Rostedte77405a2009-09-02 14:17:06 -04001145 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
Steven Rostedt38697052008-10-01 13:14:09 -04001146}
1147
Steven Rostedte77405a2009-09-02 14:17:06 -04001148void
1149ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001150{
Tom Zanussie1112b42009-03-31 00:48:49 -05001151 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001152 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001153 struct userstack_entry *entry;
1154 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001155
1156 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1157 return;
1158
Steven Rostedte77405a2009-09-02 14:17:06 -04001159 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001160 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001161 if (!event)
1162 return;
1163 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001164
Steven Rostedt48659d32009-09-11 11:36:23 -04001165 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001166 memset(&entry->caller, 0, sizeof(entry->caller));
1167
1168 trace.nr_entries = 0;
1169 trace.max_entries = FTRACE_STACK_ENTRIES;
1170 trace.skip = 0;
1171 trace.entries = entry->caller;
1172
1173 save_stack_trace_user(&trace);
Steven Rostedte77405a2009-09-02 14:17:06 -04001174 if (!filter_check_discard(call, entry, buffer, event))
1175 ring_buffer_unlock_commit(buffer, event);
Török Edwin02b67512008-11-22 13:28:47 +02001176}
1177
Hannes Eder4fd27352009-02-10 19:44:12 +01001178#ifdef UNUSED
1179static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001180{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001181 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001182}
Hannes Eder4fd27352009-02-10 19:44:12 +01001183#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001184
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001185#endif /* CONFIG_STACKTRACE */
1186
Steven Rostedt38697052008-10-01 13:14:09 -04001187static void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001188ftrace_trace_special(void *__tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001189 unsigned long arg1, unsigned long arg2, unsigned long arg3,
1190 int pc)
Ingo Molnara4feb8342008-05-12 21:21:02 +02001191{
Steven Rostedt60ba7702009-09-12 23:34:04 -04001192 struct ftrace_event_call *call = &event_special;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001193 struct ring_buffer_event *event;
Ingo Molnara4feb8342008-05-12 21:21:02 +02001194 struct trace_array *tr = __tr;
Steven Rostedte77405a2009-09-02 14:17:06 -04001195 struct ring_buffer *buffer = tr->buffer;
Steven Rostedt777e2082008-09-29 23:02:42 -04001196 struct special_entry *entry;
Ingo Molnara4feb8342008-05-12 21:21:02 +02001197
Steven Rostedte77405a2009-09-02 14:17:06 -04001198 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001199 sizeof(*entry), 0, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001200 if (!event)
1201 return;
1202 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001203 entry->arg1 = arg1;
1204 entry->arg2 = arg2;
1205 entry->arg3 = arg3;
Steven Rostedt60ba7702009-09-12 23:34:04 -04001206
1207 if (!filter_check_discard(call, entry, buffer, event))
1208 trace_buffer_unlock_commit(buffer, event, 0, pc);
Ingo Molnara4feb8342008-05-12 21:21:02 +02001209}
1210
1211void
Steven Rostedt38697052008-10-01 13:14:09 -04001212__trace_special(void *__tr, void *__data,
1213 unsigned long arg1, unsigned long arg2, unsigned long arg3)
1214{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001215 ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
Steven Rostedt38697052008-10-01 13:14:09 -04001216}
1217
1218void
Steven Rostedt4902f882008-05-22 00:22:18 -04001219ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1220{
1221 struct trace_array *tr = &global_trace;
1222 struct trace_array_cpu *data;
Steven Rostedt5aa1ba62008-11-10 23:07:30 -05001223 unsigned long flags;
Steven Rostedt4902f882008-05-22 00:22:18 -04001224 int cpu;
Steven Rostedt38697052008-10-01 13:14:09 -04001225 int pc;
Steven Rostedt4902f882008-05-22 00:22:18 -04001226
Steven Rostedtc76f0692008-11-07 22:36:02 -05001227 if (tracing_disabled)
Steven Rostedt4902f882008-05-22 00:22:18 -04001228 return;
1229
Steven Rostedt38697052008-10-01 13:14:09 -04001230 pc = preempt_count();
Steven Rostedt5aa1ba62008-11-10 23:07:30 -05001231 local_irq_save(flags);
Steven Rostedt4902f882008-05-22 00:22:18 -04001232 cpu = raw_smp_processor_id();
1233 data = tr->data[cpu];
Steven Rostedt4902f882008-05-22 00:22:18 -04001234
Steven Rostedt5aa1ba62008-11-10 23:07:30 -05001235 if (likely(atomic_inc_return(&data->disabled) == 1))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001236 ftrace_trace_special(tr, arg1, arg2, arg3, pc);
Steven Rostedt4902f882008-05-22 00:22:18 -04001237
Steven Rostedt5aa1ba62008-11-10 23:07:30 -05001238 atomic_dec(&data->disabled);
1239 local_irq_restore(flags);
Steven Rostedt4902f882008-05-22 00:22:18 -04001240}
1241
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001242/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001243 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001244 *
1245 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04001246int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001247{
Steven Rostedt80370cb2009-03-10 17:16:35 -04001248 static raw_spinlock_t trace_buf_lock =
1249 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001250 static u32 trace_buf[TRACE_BUF_SIZE];
1251
Tom Zanussie1112b42009-03-31 00:48:49 -05001252 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001253 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04001254 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001255 struct trace_array *tr = &global_trace;
1256 struct trace_array_cpu *data;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001257 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001258 unsigned long flags;
Steven Rostedt3189cdb2009-04-17 16:13:55 -04001259 int disable;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001260 int resched;
1261 int cpu, len = 0, size, pc;
1262
1263 if (unlikely(tracing_selftest_running || tracing_disabled))
1264 return 0;
1265
1266 /* Don't pollute graph traces with trace_vprintk internals */
1267 pause_graph_tracing();
1268
1269 pc = preempt_count();
1270 resched = ftrace_preempt_disable();
1271 cpu = raw_smp_processor_id();
1272 data = tr->data[cpu];
1273
Steven Rostedt3189cdb2009-04-17 16:13:55 -04001274 disable = atomic_inc_return(&data->disabled);
1275 if (unlikely(disable != 1))
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001276 goto out;
1277
Steven Rostedt80370cb2009-03-10 17:16:35 -04001278 /* Lockdep uses trace_printk for lock tracing */
1279 local_irq_save(flags);
1280 __raw_spin_lock(&trace_buf_lock);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001281 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1282
1283 if (len > TRACE_BUF_SIZE || len < 0)
1284 goto out_unlock;
1285
1286 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedte77405a2009-09-02 14:17:06 -04001287 buffer = tr->buffer;
1288 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1289 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001290 if (!event)
1291 goto out_unlock;
1292 entry = ring_buffer_event_data(event);
1293 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001294 entry->fmt = fmt;
1295
1296 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
Steven Rostedte77405a2009-09-02 14:17:06 -04001297 if (!filter_check_discard(call, entry, buffer, event))
1298 ring_buffer_unlock_commit(buffer, event);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001299
1300out_unlock:
Steven Rostedt80370cb2009-03-10 17:16:35 -04001301 __raw_spin_unlock(&trace_buf_lock);
1302 local_irq_restore(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001303
1304out:
Steven Rostedt3189cdb2009-04-17 16:13:55 -04001305 atomic_dec_return(&data->disabled);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001306 ftrace_preempt_enable(resched);
1307 unpause_graph_tracing();
1308
1309 return len;
1310}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001311EXPORT_SYMBOL_GPL(trace_vbprintk);
1312
Steven Rostedt659372d2009-09-03 19:11:07 -04001313int trace_array_printk(struct trace_array *tr,
1314 unsigned long ip, const char *fmt, ...)
1315{
1316 int ret;
1317 va_list ap;
1318
1319 if (!(trace_flags & TRACE_ITER_PRINTK))
1320 return 0;
1321
1322 va_start(ap, fmt);
1323 ret = trace_array_vprintk(tr, ip, fmt, ap);
1324 va_end(ap);
1325 return ret;
1326}
1327
1328int trace_array_vprintk(struct trace_array *tr,
1329 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001330{
1331 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1332 static char trace_buf[TRACE_BUF_SIZE];
1333
Tom Zanussie1112b42009-03-31 00:48:49 -05001334 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001335 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04001336 struct ring_buffer *buffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001337 struct trace_array_cpu *data;
1338 int cpu, len = 0, size, pc;
1339 struct print_entry *entry;
1340 unsigned long irq_flags;
Steven Rostedt3189cdb2009-04-17 16:13:55 -04001341 int disable;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001342
1343 if (tracing_disabled || tracing_selftest_running)
1344 return 0;
1345
1346 pc = preempt_count();
1347 preempt_disable_notrace();
1348 cpu = raw_smp_processor_id();
1349 data = tr->data[cpu];
1350
Steven Rostedt3189cdb2009-04-17 16:13:55 -04001351 disable = atomic_inc_return(&data->disabled);
1352 if (unlikely(disable != 1))
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001353 goto out;
1354
1355 pause_graph_tracing();
1356 raw_local_irq_save(irq_flags);
1357 __raw_spin_lock(&trace_buf_lock);
1358 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1359
1360 len = min(len, TRACE_BUF_SIZE-1);
1361 trace_buf[len] = 0;
1362
1363 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04001364 buffer = tr->buffer;
1365 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1366 irq_flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001367 if (!event)
1368 goto out_unlock;
1369 entry = ring_buffer_event_data(event);
1370 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001371
1372 memcpy(&entry->buf, trace_buf, len);
1373 entry->buf[len] = 0;
Steven Rostedte77405a2009-09-02 14:17:06 -04001374 if (!filter_check_discard(call, entry, buffer, event))
1375 ring_buffer_unlock_commit(buffer, event);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001376
1377 out_unlock:
1378 __raw_spin_unlock(&trace_buf_lock);
1379 raw_local_irq_restore(irq_flags);
1380 unpause_graph_tracing();
1381 out:
Steven Rostedt3189cdb2009-04-17 16:13:55 -04001382 atomic_dec_return(&data->disabled);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001383 preempt_enable_notrace();
1384
1385 return len;
1386}
Steven Rostedt659372d2009-09-03 19:11:07 -04001387
1388int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1389{
1390 return trace_array_printk(&global_trace, ip, fmt, args);
1391}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001392EXPORT_SYMBOL_GPL(trace_vprintk);
1393
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001394enum trace_file_type {
1395 TRACE_FILE_LAT_FMT = 1,
Steven Rostedt12ef7d42008-11-12 17:52:38 -05001396 TRACE_FILE_ANNOTATE = 2,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001397};
1398
Robert Richtere2ac8ef2008-11-12 12:59:32 +01001399static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04001400{
Steven Rostedtd7690412008-10-01 00:29:53 -04001401 /* Don't allow ftrace to trace into the ring buffers */
1402 ftrace_disable_cpu();
1403
Steven Rostedt5a90f572008-09-03 17:42:51 -04001404 iter->idx++;
Steven Rostedtd7690412008-10-01 00:29:53 -04001405 if (iter->buffer_iter[iter->cpu])
1406 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1407
1408 ftrace_enable_cpu();
Steven Rostedt5a90f572008-09-03 17:42:51 -04001409}
1410
Ingo Molnare309b412008-05-12 21:20:51 +02001411static struct trace_entry *
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001412peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001413{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001414 struct ring_buffer_event *event;
1415 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001416
Steven Rostedtd7690412008-10-01 00:29:53 -04001417 /* Don't allow ftrace to trace into the ring buffers */
1418 ftrace_disable_cpu();
1419
1420 if (buf_iter)
1421 event = ring_buffer_iter_peek(buf_iter, ts);
1422 else
1423 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1424
1425 ftrace_enable_cpu();
1426
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001427 return event ? ring_buffer_event_data(event) : NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001428}
Steven Rostedtd7690412008-10-01 00:29:53 -04001429
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001430static struct trace_entry *
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001431__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001432{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001433 struct ring_buffer *buffer = iter->tr->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001434 struct trace_entry *ent, *next = NULL;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001435 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001436 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001437 int next_cpu = -1;
1438 int cpu;
1439
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001440 /*
1441 * If we are in a per_cpu trace file, don't bother by iterating over
1442 * all cpu and peek directly.
1443 */
1444 if (cpu_file > TRACE_PIPE_ALL_CPU) {
1445 if (ring_buffer_empty_cpu(buffer, cpu_file))
1446 return NULL;
1447 ent = peek_next_entry(iter, cpu_file, ent_ts);
1448 if (ent_cpu)
1449 *ent_cpu = cpu_file;
1450
1451 return ent;
1452 }
1453
Steven Rostedtab464282008-05-12 21:21:00 +02001454 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001455
1456 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001457 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001458
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001459 ent = peek_next_entry(iter, cpu, &ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001460
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02001461 /*
1462 * Pick the entry with the smallest timestamp:
1463 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001464 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001465 next = ent;
1466 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001467 next_ts = ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001468 }
1469 }
1470
1471 if (ent_cpu)
1472 *ent_cpu = next_cpu;
1473
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001474 if (ent_ts)
1475 *ent_ts = next_ts;
1476
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001477 return next;
1478}
1479
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001480/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001481struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1482 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02001483{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001484 return __find_next_entry(iter, ent_cpu, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001485}
Ingo Molnar8c523a92008-05-12 21:20:46 +02001486
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001487/* Find the next real entry, and increment the iterator to the next entry */
1488static void *find_next_entry_inc(struct trace_iterator *iter)
1489{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001490 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02001491
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001492 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01001493 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001494
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001495 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02001496}
1497
Ingo Molnare309b412008-05-12 21:20:51 +02001498static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02001499{
Steven Rostedtd7690412008-10-01 00:29:53 -04001500 /* Don't allow ftrace to trace into the ring buffers */
1501 ftrace_disable_cpu();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001502 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
Steven Rostedtd7690412008-10-01 00:29:53 -04001503 ftrace_enable_cpu();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001504}
1505
Ingo Molnare309b412008-05-12 21:20:51 +02001506static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001507{
1508 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001509 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001510 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
1512 (*pos)++;
1513
1514 /* can't go backwards */
1515 if (iter->idx > i)
1516 return NULL;
1517
1518 if (iter->idx < 0)
1519 ent = find_next_entry_inc(iter);
1520 else
1521 ent = iter;
1522
1523 while (ent && iter->idx < i)
1524 ent = find_next_entry_inc(iter);
1525
1526 iter->pos = *pos;
1527
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528 return ent;
1529}
1530
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04001531static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1532{
1533 struct trace_array *tr = iter->tr;
1534 struct ring_buffer_event *event;
1535 struct ring_buffer_iter *buf_iter;
1536 unsigned long entries = 0;
1537 u64 ts;
1538
1539 tr->data[cpu]->skipped_entries = 0;
1540
1541 if (!iter->buffer_iter[cpu])
1542 return;
1543
1544 buf_iter = iter->buffer_iter[cpu];
1545 ring_buffer_iter_reset(buf_iter);
1546
1547 /*
1548 * We could have the case with the max latency tracers
1549 * that a reset never took place on a cpu. This is evident
1550 * by the timestamp being before the start of the buffer.
1551 */
1552 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1553 if (ts >= iter->tr->time_start)
1554 break;
1555 entries++;
1556 ring_buffer_read(buf_iter, NULL);
1557 }
1558
1559 tr->data[cpu]->skipped_entries = entries;
1560}
1561
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001562/*
1563 * No necessary locking here. The worst thing which can
1564 * happen is loosing events consumed at the same time
1565 * by a trace_pipe reader.
1566 * Other than that, we don't risk to crash the ring buffer
1567 * because it serializes the readers.
1568 *
1569 * The current tracer is copied to avoid a global locking
1570 * all around.
1571 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001572static void *s_start(struct seq_file *m, loff_t *pos)
1573{
1574 struct trace_iterator *iter = m->private;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001575 static struct tracer *old_tracer;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001576 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577 void *p = NULL;
1578 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001579 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001581 /* copy the tracer to avoid using a global lock all around */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001582 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001583 if (unlikely(old_tracer != current_trace && current_trace)) {
1584 old_tracer = current_trace;
1585 *iter->trace = *current_trace;
Steven Rostedtd15f57f2008-05-12 21:20:56 +02001586 }
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001587 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588
1589 atomic_inc(&trace_record_cmdline_disabled);
1590
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591 if (*pos != iter->pos) {
1592 iter->ent = NULL;
1593 iter->cpu = 0;
1594 iter->idx = -1;
1595
Steven Rostedtd7690412008-10-01 00:29:53 -04001596 ftrace_disable_cpu();
1597
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001598 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1599 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04001600 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001601 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04001602 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001603
Steven Rostedtd7690412008-10-01 00:29:53 -04001604 ftrace_enable_cpu();
1605
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1607 ;
1608
1609 } else {
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001610 l = *pos - 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001611 p = s_next(m, p, &l);
1612 }
1613
Lai Jiangshan4f535962009-05-18 19:35:34 +08001614 trace_event_read_lock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001615 return p;
1616}
1617
1618static void s_stop(struct seq_file *m, void *p)
1619{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001620 atomic_dec(&trace_record_cmdline_disabled);
Lai Jiangshan4f535962009-05-18 19:35:34 +08001621 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001622}
1623
Ingo Molnare309b412008-05-12 21:20:51 +02001624static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001625{
Michael Ellermana6168352008-08-20 16:36:11 -07001626 seq_puts(m, "# _------=> CPU# \n");
1627 seq_puts(m, "# / _-----=> irqs-off \n");
1628 seq_puts(m, "# | / _----=> need-resched \n");
1629 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1630 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedt637e7e82009-09-11 13:55:35 -04001631 seq_puts(m, "# |||| /_--=> lock-depth \n");
1632 seq_puts(m, "# |||||/ delay \n");
1633 seq_puts(m, "# cmd pid |||||| time | caller \n");
1634 seq_puts(m, "# \\ / |||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001635}
1636
Ingo Molnare309b412008-05-12 21:20:51 +02001637static void print_func_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001638{
Michael Ellermana6168352008-08-20 16:36:11 -07001639 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1640 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001641}
1642
1643
Ingo Molnare309b412008-05-12 21:20:51 +02001644static void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001645print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1646{
1647 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1648 struct trace_array *tr = iter->tr;
1649 struct trace_array_cpu *data = tr->data[tr->cpu];
1650 struct tracer *type = current_trace;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04001651 unsigned long entries = 0;
1652 unsigned long total = 0;
1653 unsigned long count;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001654 const char *name = "preemption";
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04001655 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001656
1657 if (type)
1658 name = type->name;
1659
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04001660
1661 for_each_tracing_cpu(cpu) {
1662 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1663 /*
1664 * If this buffer has skipped entries, then we hold all
1665 * entries for the trace and we need to ignore the
1666 * ones before the time stamp.
1667 */
1668 if (tr->data[cpu]->skipped_entries) {
1669 count -= tr->data[cpu]->skipped_entries;
1670 /* total is the same as the entries */
1671 total += count;
1672 } else
1673 total += count +
1674 ring_buffer_overrun_cpu(tr->buffer, cpu);
1675 entries += count;
1676 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001677
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09001678 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001679 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09001680 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001681 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09001682 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001683 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02001684 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001685 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001686 total,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001687 tr->cpu,
1688#if defined(CONFIG_PREEMPT_NONE)
1689 "server",
1690#elif defined(CONFIG_PREEMPT_VOLUNTARY)
1691 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04001692#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001693 "preempt",
1694#else
1695 "unknown",
1696#endif
1697 /* These are reserved for later use */
1698 0, 0, 0, 0);
1699#ifdef CONFIG_SMP
1700 seq_printf(m, " #P:%d)\n", num_online_cpus());
1701#else
1702 seq_puts(m, ")\n");
1703#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09001704 seq_puts(m, "# -----------------\n");
1705 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001706 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1707 data->comm, data->pid, data->uid, data->nice,
1708 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09001709 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001710
1711 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09001712 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02001713 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1714 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09001715 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02001716 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1717 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001718 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001719 }
1720
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09001721 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001722}
1723
Steven Rostedta3097202008-11-07 22:36:02 -05001724static void test_cpu_buff_start(struct trace_iterator *iter)
1725{
1726 struct trace_seq *s = &iter->seq;
1727
Steven Rostedt12ef7d42008-11-12 17:52:38 -05001728 if (!(trace_flags & TRACE_ITER_ANNOTATE))
1729 return;
1730
1731 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1732 return;
1733
Rusty Russell44623442009-01-01 10:12:23 +10301734 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05001735 return;
1736
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04001737 if (iter->tr->data[iter->cpu]->skipped_entries)
1738 return;
1739
Rusty Russell44623442009-01-01 10:12:23 +10301740 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02001741
1742 /* Don't print started cpu buffer for the first entry of the trace */
1743 if (iter->idx > 1)
1744 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
1745 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05001746}
1747
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001748static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001749{
Steven Rostedt214023c2008-05-12 21:20:46 +02001750 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001751 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001752 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05001753 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001754
Ingo Molnar4e3c3332008-05-12 21:20:45 +02001755 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001756
Steven Rostedta3097202008-11-07 22:36:02 -05001757 test_cpu_buff_start(iter);
1758
Steven Rostedtf633cef2008-12-23 23:24:13 -05001759 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001760
1761 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05001762 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1763 if (!trace_print_lat_context(iter))
1764 goto partial;
1765 } else {
1766 if (!trace_print_context(iter))
1767 goto partial;
1768 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001769 }
1770
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02001771 if (event)
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02001772 return event->trace(iter, sym_flags);
1773
1774 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1775 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04001776
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001777 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02001778partial:
1779 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001780}
1781
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001782static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02001783{
1784 struct trace_seq *s = &iter->seq;
1785 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05001786 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02001787
1788 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001789
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001790 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02001791 if (!trace_seq_printf(s, "%d %d %llu ",
1792 entry->pid, iter->cpu, iter->ts))
1793 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001794 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02001795
Steven Rostedtf633cef2008-12-23 23:24:13 -05001796 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02001797 if (event)
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02001798 return event->raw(iter, 0);
1799
1800 if (!trace_seq_printf(s, "%d ?\n", entry->type))
1801 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04001802
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001803 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02001804partial:
1805 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02001806}
1807
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001808static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02001809{
1810 struct trace_seq *s = &iter->seq;
1811 unsigned char newline = '\n';
1812 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05001813 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02001814
1815 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001816
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001817 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1818 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1819 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1820 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1821 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02001822
Steven Rostedtf633cef2008-12-23 23:24:13 -05001823 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02001824 if (event) {
Arnaldo Carvalho de Meloae7462b2009-02-03 22:05:50 -02001825 enum print_line_t ret = event->hex(iter, 0);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02001826 if (ret != TRACE_TYPE_HANDLED)
1827 return ret;
1828 }
Steven Rostedt7104f302008-10-01 10:52:51 -04001829
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02001830 SEQ_PUT_FIELD_RET(s, newline);
1831
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001832 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02001833}
1834
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001835static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02001836{
1837 struct trace_seq *s = &iter->seq;
1838 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05001839 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02001840
1841 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001842
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001843 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1844 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b522009-02-07 19:38:43 -05001845 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001846 SEQ_PUT_FIELD_RET(s, iter->ts);
1847 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02001848
Steven Rostedtf633cef2008-12-23 23:24:13 -05001849 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02001850 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02001851}
1852
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001853static int trace_empty(struct trace_iterator *iter)
1854{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001855 int cpu;
1856
Steven Rostedt9aba60f2009-03-11 19:52:30 -04001857 /* If we are looking at one CPU buffer, only check that one */
1858 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
1859 cpu = iter->cpu_file;
1860 if (iter->buffer_iter[cpu]) {
1861 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1862 return 0;
1863 } else {
1864 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1865 return 0;
1866 }
1867 return 1;
1868 }
1869
Steven Rostedtab464282008-05-12 21:21:00 +02001870 for_each_tracing_cpu(cpu) {
Steven Rostedtd7690412008-10-01 00:29:53 -04001871 if (iter->buffer_iter[cpu]) {
1872 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1873 return 0;
1874 } else {
1875 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1876 return 0;
1877 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001878 }
Steven Rostedtd7690412008-10-01 00:29:53 -04001879
Frederic Weisbecker797d3712008-09-30 18:13:45 +02001880 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001881}
1882
Lai Jiangshan4f535962009-05-18 19:35:34 +08001883/* Called with trace_event_read_lock() held. */
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001884static enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02001885{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02001886 enum print_line_t ret;
1887
1888 if (iter->trace && iter->trace->print_line) {
1889 ret = iter->trace->print_line(iter);
1890 if (ret != TRACE_TYPE_UNHANDLED)
1891 return ret;
1892 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02001893
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001894 if (iter->ent->type == TRACE_BPRINT &&
1895 trace_flags & TRACE_ITER_PRINTK &&
1896 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04001897 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001898
Frederic Weisbecker66896a82008-12-13 20:18:13 +01001899 if (iter->ent->type == TRACE_PRINT &&
1900 trace_flags & TRACE_ITER_PRINTK &&
1901 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04001902 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01001903
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02001904 if (trace_flags & TRACE_ITER_BIN)
1905 return print_bin_fmt(iter);
1906
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02001907 if (trace_flags & TRACE_ITER_HEX)
1908 return print_hex_fmt(iter);
1909
Ingo Molnarf9896bf2008-05-12 21:20:47 +02001910 if (trace_flags & TRACE_ITER_RAW)
1911 return print_raw_fmt(iter);
1912
Ingo Molnarf9896bf2008-05-12 21:20:47 +02001913 return print_trace_fmt(iter);
1914}
1915
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001916static int s_show(struct seq_file *m, void *v)
1917{
1918 struct trace_iterator *iter = v;
1919
1920 if (iter->ent == NULL) {
1921 if (iter->tr) {
1922 seq_printf(m, "# tracer: %s\n", iter->trace->name);
1923 seq_puts(m, "#\n");
1924 }
Markus Metzger8bba1bf2008-11-25 09:12:31 +01001925 if (iter->trace && iter->trace->print_header)
1926 iter->trace->print_header(m);
1927 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001928 /* print nothing if the buffers are empty */
1929 if (trace_empty(iter))
1930 return 0;
1931 print_trace_header(m, iter);
1932 if (!(trace_flags & TRACE_ITER_VERBOSE))
1933 print_lat_help_header(m);
1934 } else {
1935 if (!(trace_flags & TRACE_ITER_VERBOSE))
1936 print_func_help_header(m);
1937 }
1938 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02001939 print_trace_line(iter);
Steven Rostedt214023c2008-05-12 21:20:46 +02001940 trace_print_seq(m, &iter->seq);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001941 }
1942
1943 return 0;
1944}
1945
1946static struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02001947 .start = s_start,
1948 .next = s_next,
1949 .stop = s_stop,
1950 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001951};
1952
Ingo Molnare309b412008-05-12 21:20:51 +02001953static struct trace_iterator *
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05001954__tracing_open(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001955{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001956 long cpu_file = (long) inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05001957 void *fail_ret = ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001958 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001959 struct seq_file *m;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05001960 int cpu, ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001961
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05001962 if (tracing_disabled)
1963 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02001964
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001965 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05001966 if (!iter)
1967 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001968
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001969 /*
1970 * We make a copy of the current tracer to avoid concurrent
1971 * changes on it while we are reading.
1972 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001973 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001974 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05001975 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001976 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05001977
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001978 if (current_trace)
1979 *iter->trace = *current_trace;
1980
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02001981 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
1982 goto fail;
1983
1984 cpumask_clear(iter->started);
1985
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001986 if (current_trace && current_trace->print_max)
1987 iter->tr = &max_tr;
1988 else
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001989 iter->tr = &global_trace;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001990 iter->pos = -1;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01001991 mutex_init(&iter->mutex);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001992 iter->cpu_file = cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001993
Markus Metzger8bba1bf2008-11-25 09:12:31 +01001994 /* Notify the tracer early; before we stop tracing. */
1995 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01001996 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01001997
Steven Rostedt12ef7d42008-11-12 17:52:38 -05001998 /* Annotate start of buffers if we had overruns */
1999 if (ring_buffer_overruns(iter->tr->buffer))
2000 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2001
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002002 /* stop the trace while dumping */
2003 tracing_stop();
2004
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002005 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2006 for_each_tracing_cpu(cpu) {
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002007
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002008 iter->buffer_iter[cpu] =
2009 ring_buffer_read_start(iter->tr->buffer, cpu);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002010 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002011 }
2012 } else {
2013 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002014 iter->buffer_iter[cpu] =
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002015 ring_buffer_read_start(iter->tr->buffer, cpu);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002016 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002017 }
2018
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002019 ret = seq_open(file, &tracer_seq_ops);
2020 if (ret < 0) {
2021 fail_ret = ERR_PTR(ret);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002022 goto fail_buffer;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002023 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002024
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002025 m = file->private_data;
2026 m->private = iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002027
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002028 mutex_unlock(&trace_types_lock);
2029
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002030 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002031
2032 fail_buffer:
2033 for_each_tracing_cpu(cpu) {
2034 if (iter->buffer_iter[cpu])
2035 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2036 }
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002037 free_cpumask_var(iter->started);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002038 tracing_start();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002039 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002040 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002041 kfree(iter->trace);
Julia Lawall0bb943c2008-11-14 19:05:31 +01002042 kfree(iter);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002043
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002044 return fail_ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002045}
2046
2047int tracing_open_generic(struct inode *inode, struct file *filp)
2048{
Steven Rostedt60a11772008-05-12 21:20:44 +02002049 if (tracing_disabled)
2050 return -ENODEV;
2051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002052 filp->private_data = inode->i_private;
2053 return 0;
2054}
2055
Hannes Eder4fd27352009-02-10 19:44:12 +01002056static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002057{
2058 struct seq_file *m = (struct seq_file *)file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002059 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002060 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002061
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002062 if (!(file->f_mode & FMODE_READ))
2063 return 0;
2064
2065 iter = m->private;
2066
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002067 mutex_lock(&trace_types_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002068 for_each_tracing_cpu(cpu) {
2069 if (iter->buffer_iter[cpu])
2070 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2071 }
2072
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002073 if (iter->trace && iter->trace->close)
2074 iter->trace->close(iter);
2075
2076 /* reenable tracing if it was previously enabled */
Steven Rostedt90369902008-11-05 16:05:44 -05002077 tracing_start();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002078 mutex_unlock(&trace_types_lock);
2079
2080 seq_release(inode, file);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002081 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002082 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002083 kfree(iter->trace);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002084 kfree(iter);
2085 return 0;
2086}
2087
2088static int tracing_open(struct inode *inode, struct file *file)
2089{
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002090 struct trace_iterator *iter;
2091 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002092
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002093 /* If this file was open for write, then erase contents */
2094 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04002095 (file->f_flags & O_TRUNC)) {
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002096 long cpu = (long) inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002097
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002098 if (cpu == TRACE_PIPE_ALL_CPU)
2099 tracing_reset_online_cpus(&global_trace);
2100 else
2101 tracing_reset(&global_trace, cpu);
2102 }
2103
2104 if (file->f_mode & FMODE_READ) {
2105 iter = __tracing_open(inode, file);
2106 if (IS_ERR(iter))
2107 ret = PTR_ERR(iter);
2108 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2109 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2110 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002111 return ret;
2112}
2113
Ingo Molnare309b412008-05-12 21:20:51 +02002114static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002115t_next(struct seq_file *m, void *v, loff_t *pos)
2116{
Li Zefanf129e962009-06-24 09:53:44 +08002117 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002118
2119 (*pos)++;
2120
2121 if (t)
2122 t = t->next;
2123
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002124 return t;
2125}
2126
2127static void *t_start(struct seq_file *m, loff_t *pos)
2128{
Li Zefanf129e962009-06-24 09:53:44 +08002129 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002130 loff_t l = 0;
2131
2132 mutex_lock(&trace_types_lock);
Li Zefanf129e962009-06-24 09:53:44 +08002133 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002134 ;
2135
2136 return t;
2137}
2138
2139static void t_stop(struct seq_file *m, void *p)
2140{
2141 mutex_unlock(&trace_types_lock);
2142}
2143
2144static int t_show(struct seq_file *m, void *v)
2145{
2146 struct tracer *t = v;
2147
2148 if (!t)
2149 return 0;
2150
2151 seq_printf(m, "%s", t->name);
2152 if (t->next)
2153 seq_putc(m, ' ');
2154 else
2155 seq_putc(m, '\n');
2156
2157 return 0;
2158}
2159
2160static struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002161 .start = t_start,
2162 .next = t_next,
2163 .stop = t_stop,
2164 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002165};
2166
2167static int show_traces_open(struct inode *inode, struct file *file)
2168{
Steven Rostedt60a11772008-05-12 21:20:44 +02002169 if (tracing_disabled)
2170 return -ENODEV;
2171
Li Zefanf129e962009-06-24 09:53:44 +08002172 return seq_open(file, &show_traces_seq_ops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002173}
2174
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002175static ssize_t
2176tracing_write_stub(struct file *filp, const char __user *ubuf,
2177 size_t count, loff_t *ppos)
2178{
2179 return count;
2180}
2181
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002182static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002183 .open = tracing_open,
2184 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002185 .write = tracing_write_stub,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002186 .llseek = seq_lseek,
2187 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002188};
2189
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002190static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02002191 .open = show_traces_open,
2192 .read = seq_read,
2193 .release = seq_release,
2194};
2195
Ingo Molnar36dfe922008-05-12 21:20:52 +02002196/*
2197 * Only trace on a CPU if the bitmask is set:
2198 */
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302199static cpumask_var_t tracing_cpumask;
Ingo Molnar36dfe922008-05-12 21:20:52 +02002200
2201/*
2202 * The tracer itself will not take this lock, but still we want
2203 * to provide a consistent cpumask to user-space:
2204 */
2205static DEFINE_MUTEX(tracing_cpumask_update_lock);
2206
2207/*
2208 * Temporary storage for the character representation of the
2209 * CPU bitmask (and one more byte for the newline):
2210 */
2211static char mask_str[NR_CPUS + 1];
2212
Ingo Molnarc7078de2008-05-12 21:20:52 +02002213static ssize_t
2214tracing_cpumask_read(struct file *filp, char __user *ubuf,
2215 size_t count, loff_t *ppos)
2216{
Ingo Molnar36dfe922008-05-12 21:20:52 +02002217 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02002218
2219 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002220
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302221 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002222 if (count - len < 2) {
2223 count = -EINVAL;
2224 goto out_err;
2225 }
2226 len += sprintf(mask_str + len, "\n");
2227 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2228
2229out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02002230 mutex_unlock(&tracing_cpumask_update_lock);
2231
2232 return count;
2233}
2234
2235static ssize_t
2236tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2237 size_t count, loff_t *ppos)
2238{
Ingo Molnar36dfe922008-05-12 21:20:52 +02002239 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302240 cpumask_var_t tracing_cpumask_new;
2241
2242 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2243 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02002244
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302245 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002246 if (err)
2247 goto err_unlock;
2248
Li Zefan215368e2009-06-15 10:56:42 +08002249 mutex_lock(&tracing_cpumask_update_lock);
2250
Steven Rostedta5e25882008-12-02 15:34:05 -05002251 local_irq_disable();
Steven Rostedt92205c22008-05-12 21:20:55 +02002252 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02002253 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02002254 /*
2255 * Increase/decrease the disabled counter if we are
2256 * about to flip a bit in the cpumask:
2257 */
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302258 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2259 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02002260 atomic_inc(&global_trace.data[cpu]->disabled);
2261 }
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302262 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2263 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02002264 atomic_dec(&global_trace.data[cpu]->disabled);
2265 }
2266 }
Steven Rostedt92205c22008-05-12 21:20:55 +02002267 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05002268 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02002269
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302270 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002271
Ingo Molnarc7078de2008-05-12 21:20:52 +02002272 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302273 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02002274
Ingo Molnarc7078de2008-05-12 21:20:52 +02002275 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02002276
2277err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08002278 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002279
2280 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02002281}
2282
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002283static const struct file_operations tracing_cpumask_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02002284 .open = tracing_open_generic,
2285 .read = tracing_cpumask_read,
2286 .write = tracing_cpumask_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002287};
2288
2289static ssize_t
Steven Rostedtee6bce52008-11-12 17:52:37 -05002290tracing_trace_options_read(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002291 size_t cnt, loff_t *ppos)
2292{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002293 struct tracer_opt *trace_opts;
2294 u32 tracer_flags;
2295 int len = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002296 char *buf;
2297 int r = 0;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002298 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002299
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300
Wenji Huangc3706f02009-02-10 01:03:18 -05002301 /* calculate max size */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002302 for (i = 0; trace_options[i]; i++) {
2303 len += strlen(trace_options[i]);
Steven Rostedt5c6a3ae2009-02-27 00:22:21 -05002304 len += 3; /* "no" and newline */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002305 }
2306
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002307 mutex_lock(&trace_types_lock);
2308 tracer_flags = current_trace->flags->val;
2309 trace_opts = current_trace->flags->opts;
2310
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002311 /*
2312 * Increase the size with names of options specific
2313 * of the current tracer.
2314 */
2315 for (i = 0; trace_opts[i].name; i++) {
2316 len += strlen(trace_opts[i].name);
Steven Rostedt5c6a3ae2009-02-27 00:22:21 -05002317 len += 3; /* "no" and newline */
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002318 }
2319
Xiao Guangrongff4e9da2009-06-22 10:33:07 +08002320 /* +1 for \0 */
2321 buf = kmalloc(len + 1, GFP_KERNEL);
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002322 if (!buf) {
2323 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002324 return -ENOMEM;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002325 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326
2327 for (i = 0; trace_options[i]; i++) {
2328 if (trace_flags & (1 << i))
Steven Rostedt5c6a3ae2009-02-27 00:22:21 -05002329 r += sprintf(buf + r, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002330 else
Steven Rostedt5c6a3ae2009-02-27 00:22:21 -05002331 r += sprintf(buf + r, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002332 }
2333
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002334 for (i = 0; trace_opts[i].name; i++) {
2335 if (tracer_flags & trace_opts[i].bit)
Steven Rostedt5c6a3ae2009-02-27 00:22:21 -05002336 r += sprintf(buf + r, "%s\n",
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002337 trace_opts[i].name);
2338 else
Steven Rostedt5c6a3ae2009-02-27 00:22:21 -05002339 r += sprintf(buf + r, "no%s\n",
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002340 trace_opts[i].name);
2341 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002342 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002343
Xiao Guangrongff4e9da2009-06-22 10:33:07 +08002344 WARN_ON(r >= len + 1);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002345
Ingo Molnar36dfe922008-05-12 21:20:52 +02002346 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002347
2348 kfree(buf);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002349 return r;
2350}
2351
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002352/* Try to assign a tracer specific option */
2353static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2354{
Zhaolei77708412009-08-07 18:53:21 +08002355 struct tracer_flags *tracer_flags = trace->flags;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002356 struct tracer_opt *opts = NULL;
2357 int ret = 0, i = 0;
2358 int len;
2359
Zhaolei77708412009-08-07 18:53:21 +08002360 for (i = 0; tracer_flags->opts[i].name; i++) {
2361 opts = &tracer_flags->opts[i];
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002362 len = strlen(opts->name);
2363
2364 if (strncmp(cmp, opts->name, len) == 0) {
Zhaolei77708412009-08-07 18:53:21 +08002365 ret = trace->set_flag(tracer_flags->val,
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002366 opts->bit, !neg);
2367 break;
2368 }
2369 }
2370 /* Not found */
Zhaolei77708412009-08-07 18:53:21 +08002371 if (!tracer_flags->opts[i].name)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002372 return -EINVAL;
2373
2374 /* Refused to handle */
2375 if (ret)
2376 return ret;
2377
2378 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08002379 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002380 else
Zhaolei77708412009-08-07 18:53:21 +08002381 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002382
2383 return 0;
2384}
2385
Steven Rostedtaf4617b2009-03-17 18:09:55 -04002386static void set_tracer_flags(unsigned int mask, int enabled)
2387{
2388 /* do nothing if flag is already set */
2389 if (!!(trace_flags & mask) == !!enabled)
2390 return;
2391
2392 if (enabled)
2393 trace_flags |= mask;
2394 else
2395 trace_flags &= ~mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04002396}
2397
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002398static ssize_t
Steven Rostedtee6bce52008-11-12 17:52:37 -05002399tracing_trace_options_write(struct file *filp, const char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002400 size_t cnt, loff_t *ppos)
2401{
2402 char buf[64];
2403 char *cmp = buf;
2404 int neg = 0;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002405 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002406 int i;
2407
Steven Rostedtcffae432008-05-12 21:21:00 +02002408 if (cnt >= sizeof(buf))
2409 return -EINVAL;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002410
2411 if (copy_from_user(&buf, ubuf, cnt))
2412 return -EFAULT;
2413
2414 buf[cnt] = 0;
2415
2416 if (strncmp(buf, "no", 2) == 0) {
2417 neg = 1;
2418 cmp += 2;
2419 }
2420
2421 for (i = 0; trace_options[i]; i++) {
2422 int len = strlen(trace_options[i]);
2423
2424 if (strncmp(cmp, trace_options[i], len) == 0) {
Steven Rostedtaf4617b2009-03-17 18:09:55 -04002425 set_tracer_flags(1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002426 break;
2427 }
2428 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002429
2430 /* If no option could be set, test the specific tracer options */
2431 if (!trace_options[i]) {
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002432 mutex_lock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002433 ret = set_tracer_option(current_trace, cmp, neg);
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002434 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002435 if (ret)
2436 return ret;
2437 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002438
2439 filp->f_pos += cnt;
2440
2441 return cnt;
2442}
2443
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002444static const struct file_operations tracing_iter_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02002445 .open = tracing_open_generic,
Steven Rostedtee6bce52008-11-12 17:52:37 -05002446 .read = tracing_trace_options_read,
2447 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002448};
2449
Ingo Molnar7bd2f242008-05-12 21:20:45 +02002450static const char readme_msg[] =
2451 "tracing mini-HOWTO:\n\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09002452 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2453 "# cat /sys/kernel/debug/tracing/available_tracers\n"
Nikanth Karthikesanbc2b6872009-03-23 11:58:31 +05302454 "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09002455 "# cat /sys/kernel/debug/tracing/current_tracer\n"
Nikanth Karthikesanbc2b6872009-03-23 11:58:31 +05302456 "nop\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09002457 "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
2458 "# cat /sys/kernel/debug/tracing/current_tracer\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02002459 "sched_switch\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09002460 "# cat /sys/kernel/debug/tracing/trace_options\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02002461 "noprint-parent nosym-offset nosym-addr noverbose\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09002462 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2463 "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
2464 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2465 "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02002466;
2467
2468static ssize_t
2469tracing_readme_read(struct file *filp, char __user *ubuf,
2470 size_t cnt, loff_t *ppos)
2471{
2472 return simple_read_from_buffer(ubuf, cnt, ppos,
2473 readme_msg, strlen(readme_msg));
2474}
2475
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002476static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02002477 .open = tracing_open_generic,
2478 .read = tracing_readme_read,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02002479};
2480
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002481static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04002482tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2483 size_t cnt, loff_t *ppos)
2484{
2485 char *buf_comm;
2486 char *file_buf;
2487 char *buf;
2488 int len = 0;
2489 int pid;
2490 int i;
2491
2492 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2493 if (!file_buf)
2494 return -ENOMEM;
2495
2496 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2497 if (!buf_comm) {
2498 kfree(file_buf);
2499 return -ENOMEM;
2500 }
2501
2502 buf = file_buf;
2503
2504 for (i = 0; i < SAVED_CMDLINES; i++) {
2505 int r;
2506
2507 pid = map_cmdline_to_pid[i];
2508 if (pid == -1 || pid == NO_CMDLINE_MAP)
2509 continue;
2510
2511 trace_find_cmdline(pid, buf_comm);
2512 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2513 buf += r;
2514 len += r;
2515 }
2516
2517 len = simple_read_from_buffer(ubuf, cnt, ppos,
2518 file_buf, len);
2519
2520 kfree(file_buf);
2521 kfree(buf_comm);
2522
2523 return len;
2524}
2525
2526static const struct file_operations tracing_saved_cmdlines_fops = {
2527 .open = tracing_open_generic,
2528 .read = tracing_saved_cmdlines_read,
2529};
2530
2531static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002532tracing_ctrl_read(struct file *filp, char __user *ubuf,
2533 size_t cnt, loff_t *ppos)
2534{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002535 char buf[64];
2536 int r;
2537
Steven Rostedt90369902008-11-05 16:05:44 -05002538 r = sprintf(buf, "%u\n", tracer_enabled);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002539 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002540}
2541
2542static ssize_t
2543tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2544 size_t cnt, loff_t *ppos)
2545{
2546 struct trace_array *tr = filp->private_data;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002547 char buf[64];
Hannes Eder5e398412009-02-10 19:44:34 +01002548 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02002549 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002550
Steven Rostedtcffae432008-05-12 21:21:00 +02002551 if (cnt >= sizeof(buf))
2552 return -EINVAL;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002553
2554 if (copy_from_user(&buf, ubuf, cnt))
2555 return -EFAULT;
2556
2557 buf[cnt] = 0;
2558
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02002559 ret = strict_strtoul(buf, 10, &val);
2560 if (ret < 0)
2561 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002562
2563 val = !!val;
2564
2565 mutex_lock(&trace_types_lock);
Steven Rostedt90369902008-11-05 16:05:44 -05002566 if (tracer_enabled ^ val) {
2567 if (val) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002568 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -05002569 if (current_trace->start)
2570 current_trace->start(tr);
2571 tracing_start();
2572 } else {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002573 tracer_enabled = 0;
Steven Rostedt90369902008-11-05 16:05:44 -05002574 tracing_stop();
2575 if (current_trace->stop)
2576 current_trace->stop(tr);
2577 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002578 }
2579 mutex_unlock(&trace_types_lock);
2580
2581 filp->f_pos += cnt;
2582
2583 return cnt;
2584}
2585
2586static ssize_t
2587tracing_set_trace_read(struct file *filp, char __user *ubuf,
2588 size_t cnt, loff_t *ppos)
2589{
Li Zefanee6c2c12009-09-18 14:06:47 +08002590 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002591 int r;
2592
2593 mutex_lock(&trace_types_lock);
2594 if (current_trace)
2595 r = sprintf(buf, "%s\n", current_trace->name);
2596 else
2597 r = sprintf(buf, "\n");
2598 mutex_unlock(&trace_types_lock);
2599
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002600 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002601}
2602
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02002603int tracer_init(struct tracer *t, struct trace_array *tr)
2604{
2605 tracing_reset_online_cpus(tr);
2606 return t->init(tr);
2607}
2608
Steven Rostedt73c51622009-03-11 13:42:01 -04002609static int tracing_resize_ring_buffer(unsigned long size)
2610{
2611 int ret;
2612
2613 /*
2614 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04002615 * we use the size that was given, and we can forget about
2616 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04002617 */
2618 ring_buffer_expanded = 1;
2619
2620 ret = ring_buffer_resize(global_trace.buffer, size);
2621 if (ret < 0)
2622 return ret;
2623
2624 ret = ring_buffer_resize(max_tr.buffer, size);
2625 if (ret < 0) {
2626 int r;
2627
2628 r = ring_buffer_resize(global_trace.buffer,
2629 global_trace.entries);
2630 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04002631 /*
2632 * AARGH! We are left with different
2633 * size max buffer!!!!
2634 * The max buffer is our "snapshot" buffer.
2635 * When a tracer needs a snapshot (one of the
2636 * latency tracers), it swaps the max buffer
2637 * with the saved snap shot. We succeeded to
2638 * update the size of the main buffer, but failed to
2639 * update the size of the max buffer. But when we tried
2640 * to reset the main buffer to the original size, we
2641 * failed there too. This is very unlikely to
2642 * happen, but if it does, warn and kill all
2643 * tracing.
2644 */
Steven Rostedt73c51622009-03-11 13:42:01 -04002645 WARN_ON(1);
2646 tracing_disabled = 1;
2647 }
2648 return ret;
2649 }
2650
2651 global_trace.entries = size;
2652
2653 return ret;
2654}
2655
Steven Rostedt1852fcc2009-03-11 14:33:00 -04002656/**
2657 * tracing_update_buffers - used by tracing facility to expand ring buffers
2658 *
2659 * To save on memory when the tracing is never used on a system with it
2660 * configured in. The ring buffers are set to a minimum size. But once
2661 * a user starts to use the tracing facility, then they need to grow
2662 * to their default size.
2663 *
2664 * This function is to be called when a tracer is about to be used.
2665 */
2666int tracing_update_buffers(void)
2667{
2668 int ret = 0;
2669
Steven Rostedt1027fcb2009-03-12 11:33:20 -04002670 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04002671 if (!ring_buffer_expanded)
2672 ret = tracing_resize_ring_buffer(trace_buf_size);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04002673 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04002674
2675 return ret;
2676}
2677
Steven Rostedt577b7852009-02-26 23:43:05 -05002678struct trace_option_dentry;
2679
2680static struct trace_option_dentry *
2681create_trace_option_files(struct tracer *tracer);
2682
2683static void
2684destroy_trace_option_files(struct trace_option_dentry *topts);
2685
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002686static int tracing_set_tracer(const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002687{
Steven Rostedt577b7852009-02-26 23:43:05 -05002688 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002689 struct trace_array *tr = &global_trace;
2690 struct tracer *t;
Peter Zijlstrad9e54072008-11-01 19:57:37 +01002691 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002692
Steven Rostedt1027fcb2009-03-12 11:33:20 -04002693 mutex_lock(&trace_types_lock);
2694
Steven Rostedt73c51622009-03-11 13:42:01 -04002695 if (!ring_buffer_expanded) {
2696 ret = tracing_resize_ring_buffer(trace_buf_size);
2697 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01002698 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04002699 ret = 0;
2700 }
2701
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002702 for (t = trace_types; t; t = t->next) {
2703 if (strcmp(t->name, buf) == 0)
2704 break;
2705 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02002706 if (!t) {
2707 ret = -EINVAL;
2708 goto out;
2709 }
2710 if (t == current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002711 goto out;
2712
Steven Rostedt9f029e82008-11-12 15:24:24 -05002713 trace_branch_disable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002714 if (current_trace && current_trace->reset)
2715 current_trace->reset(tr);
2716
Steven Rostedt577b7852009-02-26 23:43:05 -05002717 destroy_trace_option_files(topts);
2718
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002719 current_trace = t;
Steven Rostedt577b7852009-02-26 23:43:05 -05002720
2721 topts = create_trace_option_files(current_trace);
2722
Frederic Weisbecker1c800252008-11-16 05:57:26 +01002723 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02002724 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01002725 if (ret)
2726 goto out;
2727 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002728
Steven Rostedt9f029e82008-11-12 15:24:24 -05002729 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002730 out:
2731 mutex_unlock(&trace_types_lock);
2732
Peter Zijlstrad9e54072008-11-01 19:57:37 +01002733 return ret;
2734}
2735
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002736static ssize_t
2737tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2738 size_t cnt, loff_t *ppos)
2739{
Li Zefanee6c2c12009-09-18 14:06:47 +08002740 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002741 int i;
2742 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01002743 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002744
Steven Rostedt60063a62008-10-28 10:44:24 -04002745 ret = cnt;
2746
Li Zefanee6c2c12009-09-18 14:06:47 +08002747 if (cnt > MAX_TRACER_SIZE)
2748 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002749
2750 if (copy_from_user(&buf, ubuf, cnt))
2751 return -EFAULT;
2752
2753 buf[cnt] = 0;
2754
2755 /* strip ending whitespace. */
2756 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2757 buf[i] = 0;
2758
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01002759 err = tracing_set_tracer(buf);
2760 if (err)
2761 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002762
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01002763 filp->f_pos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002764
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02002765 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002766}
2767
2768static ssize_t
2769tracing_max_lat_read(struct file *filp, char __user *ubuf,
2770 size_t cnt, loff_t *ppos)
2771{
2772 unsigned long *ptr = filp->private_data;
2773 char buf[64];
2774 int r;
2775
Steven Rostedtcffae432008-05-12 21:21:00 +02002776 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002777 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02002778 if (r > sizeof(buf))
2779 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002780 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002781}
2782
2783static ssize_t
2784tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2785 size_t cnt, loff_t *ppos)
2786{
Hannes Eder5e398412009-02-10 19:44:34 +01002787 unsigned long *ptr = filp->private_data;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002788 char buf[64];
Hannes Eder5e398412009-02-10 19:44:34 +01002789 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02002790 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002791
Steven Rostedtcffae432008-05-12 21:21:00 +02002792 if (cnt >= sizeof(buf))
2793 return -EINVAL;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002794
2795 if (copy_from_user(&buf, ubuf, cnt))
2796 return -EFAULT;
2797
2798 buf[cnt] = 0;
2799
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02002800 ret = strict_strtoul(buf, 10, &val);
2801 if (ret < 0)
2802 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002803
2804 *ptr = val * 1000;
2805
2806 return cnt;
2807}
2808
Steven Rostedtb3806b42008-05-12 21:20:46 +02002809static int tracing_open_pipe(struct inode *inode, struct file *filp)
2810{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002811 long cpu_file = (long) inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002812 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002813 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002814
2815 if (tracing_disabled)
2816 return -ENODEV;
2817
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002818 mutex_lock(&trace_types_lock);
2819
2820 /* We only allow one reader per cpu */
2821 if (cpu_file == TRACE_PIPE_ALL_CPU) {
2822 if (!cpumask_empty(tracing_reader_cpumask)) {
2823 ret = -EBUSY;
2824 goto out;
2825 }
2826 cpumask_setall(tracing_reader_cpumask);
2827 } else {
2828 if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask))
2829 cpumask_set_cpu(cpu_file, tracing_reader_cpumask);
2830 else {
2831 ret = -EBUSY;
2832 goto out;
2833 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02002834 }
2835
2836 /* create a buffer to store the information to pass to userspace */
2837 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002838 if (!iter) {
2839 ret = -ENOMEM;
2840 goto out;
2841 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02002842
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002843 /*
2844 * We make a copy of the current tracer to avoid concurrent
2845 * changes on it while we are reading.
2846 */
2847 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
2848 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002849 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002850 goto fail;
2851 }
2852 if (current_trace)
2853 *iter->trace = *current_trace;
2854
2855 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
2856 ret = -ENOMEM;
2857 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10302858 }
2859
Steven Rostedta3097202008-11-07 22:36:02 -05002860 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10302861 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05002862
Steven Rostedt112f38a72009-06-01 15:16:05 -04002863 if (trace_flags & TRACE_ITER_LATENCY_FMT)
2864 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2865
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002866 iter->cpu_file = cpu_file;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002867 iter->tr = &global_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002868 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002869 filp->private_data = iter;
2870
Steven Rostedt107bad82008-05-12 21:21:01 +02002871 if (iter->trace->pipe_open)
2872 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02002873
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002874out:
2875 mutex_unlock(&trace_types_lock);
2876 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002877
2878fail:
2879 kfree(iter->trace);
2880 kfree(iter);
2881 mutex_unlock(&trace_types_lock);
2882 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002883}
2884
2885static int tracing_release_pipe(struct inode *inode, struct file *file)
2886{
2887 struct trace_iterator *iter = file->private_data;
2888
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002889 mutex_lock(&trace_types_lock);
2890
2891 if (iter->cpu_file == TRACE_PIPE_ALL_CPU)
2892 cpumask_clear(tracing_reader_cpumask);
2893 else
2894 cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
2895
2896 mutex_unlock(&trace_types_lock);
2897
Rusty Russell44623442009-01-01 10:12:23 +10302898 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002899 mutex_destroy(&iter->mutex);
2900 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002901 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002902
2903 return 0;
2904}
2905
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02002906static unsigned int
2907tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2908{
2909 struct trace_iterator *iter = filp->private_data;
2910
2911 if (trace_flags & TRACE_ITER_BLOCK) {
2912 /*
2913 * Always select as readable when in blocking mode
2914 */
2915 return POLLIN | POLLRDNORM;
Ingo Molnarafc2abc2008-05-12 21:21:00 +02002916 } else {
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02002917 if (!trace_empty(iter))
2918 return POLLIN | POLLRDNORM;
2919 poll_wait(filp, &trace_wait, poll_table);
2920 if (!trace_empty(iter))
2921 return POLLIN | POLLRDNORM;
2922
2923 return 0;
2924 }
2925}
2926
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002927
2928void default_wait_pipe(struct trace_iterator *iter)
2929{
2930 DEFINE_WAIT(wait);
2931
2932 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
2933
2934 if (trace_empty(iter))
2935 schedule();
2936
2937 finish_wait(&trace_wait, &wait);
2938}
2939
2940/*
2941 * This is a make-shift waitqueue.
2942 * A tracer might use this callback on some rare cases:
2943 *
2944 * 1) the current tracer might hold the runqueue lock when it wakes up
2945 * a reader, hence a deadlock (sched, function, and function graph tracers)
2946 * 2) the function tracers, trace all functions, we don't want
2947 * the overhead of calling wake_up and friends
2948 * (and tracing them too)
2949 *
2950 * Anyway, this is really very primitive wakeup.
2951 */
2952void poll_wait_pipe(struct trace_iterator *iter)
2953{
2954 set_current_state(TASK_INTERRUPTIBLE);
2955 /* sleep for 100 msecs, and try again. */
2956 schedule_timeout(HZ / 10);
2957}
2958
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02002959/* Must be called with trace_types_lock mutex held. */
2960static int tracing_wait_pipe(struct file *filp)
2961{
2962 struct trace_iterator *iter = filp->private_data;
2963
2964 while (trace_empty(iter)) {
2965
2966 if ((filp->f_flags & O_NONBLOCK)) {
2967 return -EAGAIN;
2968 }
2969
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002970 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02002971
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002972 iter->trace->wait_pipe(iter);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02002973
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002974 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02002975
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002976 if (signal_pending(current))
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02002977 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02002978
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02002979 /*
2980 * We block until we read something and tracing is disabled.
2981 * We still block if tracing is disabled, but we have never
2982 * read anything. This allows a user to cat this file, and
2983 * then enable tracing. But after we have read something,
2984 * we give an EOF when tracing is again disabled.
2985 *
2986 * iter->pos will be 0 if we haven't read anything.
2987 */
2988 if (!tracer_enabled && iter->pos)
2989 break;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02002990 }
2991
2992 return 1;
2993}
2994
Steven Rostedtb3806b42008-05-12 21:20:46 +02002995/*
2996 * Consumer reader.
2997 */
2998static ssize_t
2999tracing_read_pipe(struct file *filp, char __user *ubuf,
3000 size_t cnt, loff_t *ppos)
3001{
3002 struct trace_iterator *iter = filp->private_data;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003003 static struct tracer *old_tracer;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003004 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003005
3006 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003007 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3008 if (sret != -EBUSY)
3009 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003010
Steven Rostedtf9520752009-03-02 14:04:40 -05003011 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003012
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003013 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02003014 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003015 if (unlikely(old_tracer != current_trace && current_trace)) {
3016 old_tracer = current_trace;
3017 *iter->trace = *current_trace;
3018 }
3019 mutex_unlock(&trace_types_lock);
3020
3021 /*
3022 * Avoid more than one consumer on a single file descriptor
3023 * This is just a matter of traces coherency, the ring buffer itself
3024 * is protected.
3025 */
3026 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02003027 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003028 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3029 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02003030 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02003031 }
3032
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02003033waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003034 sret = tracing_wait_pipe(filp);
3035 if (sret <= 0)
3036 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003037
3038 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003039 if (trace_empty(iter)) {
3040 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02003041 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003042 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02003043
3044 if (cnt >= PAGE_SIZE)
3045 cnt = PAGE_SIZE - 1;
3046
Steven Rostedt53d0aa72008-05-12 21:21:01 +02003047 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02003048 memset(&iter->seq, 0,
3049 sizeof(struct trace_iterator) -
3050 offsetof(struct trace_iterator, seq));
Steven Rostedt4823ed72008-05-12 21:21:01 +02003051 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003052
Lai Jiangshan4f535962009-05-18 19:35:34 +08003053 trace_event_read_lock();
Steven Rostedt088b1e422008-05-12 21:20:48 +02003054 while (find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003055 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02003056 int len = iter->seq.len;
3057
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003058 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003059 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02003060 /* don't print partial lines */
3061 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003062 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02003063 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01003064 if (ret != TRACE_TYPE_NO_CONSUME)
3065 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003066
3067 if (iter->seq.len >= cnt)
3068 break;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003069 }
Lai Jiangshan4f535962009-05-18 19:35:34 +08003070 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02003071
Steven Rostedtb3806b42008-05-12 21:20:46 +02003072 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003073 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3074 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05003075 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02003076
3077 /*
3078 * If there was nothing to send to user, inspite of consuming trace
3079 * entries, go back to wait for more entries.
3080 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003081 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02003082 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003083
Steven Rostedt107bad82008-05-12 21:21:01 +02003084out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003085 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02003086
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003087 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003088}
3089
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003090static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3091 struct pipe_buffer *buf)
3092{
3093 __free_page(buf->page);
3094}
3095
3096static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3097 unsigned int idx)
3098{
3099 __free_page(spd->pages[idx]);
3100}
3101
3102static struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05003103 .can_merge = 0,
3104 .map = generic_pipe_buf_map,
3105 .unmap = generic_pipe_buf_unmap,
3106 .confirm = generic_pipe_buf_confirm,
3107 .release = tracing_pipe_buf_release,
3108 .steal = generic_pipe_buf_steal,
3109 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003110};
3111
Steven Rostedt34cd4992009-02-09 12:06:29 -05003112static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01003113tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05003114{
3115 size_t count;
3116 int ret;
3117
3118 /* Seq buffer is page-sized, exactly what we need. */
3119 for (;;) {
3120 count = iter->seq.len;
3121 ret = print_trace_line(iter);
3122 count = iter->seq.len - count;
3123 if (rem < count) {
3124 rem = 0;
3125 iter->seq.len -= count;
3126 break;
3127 }
3128 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3129 iter->seq.len -= count;
3130 break;
3131 }
3132
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08003133 if (ret != TRACE_TYPE_NO_CONSUME)
3134 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05003135 rem -= count;
3136 if (!find_next_entry_inc(iter)) {
3137 rem = 0;
3138 iter->ent = NULL;
3139 break;
3140 }
3141 }
3142
3143 return rem;
3144}
3145
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003146static ssize_t tracing_splice_read_pipe(struct file *filp,
3147 loff_t *ppos,
3148 struct pipe_inode_info *pipe,
3149 size_t len,
3150 unsigned int flags)
3151{
3152 struct page *pages[PIPE_BUFFERS];
3153 struct partial_page partial[PIPE_BUFFERS];
3154 struct trace_iterator *iter = filp->private_data;
3155 struct splice_pipe_desc spd = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05003156 .pages = pages,
3157 .partial = partial,
3158 .nr_pages = 0, /* This gets updated below. */
3159 .flags = flags,
3160 .ops = &tracing_pipe_buf_ops,
3161 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003162 };
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003163 static struct tracer *old_tracer;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003164 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05003165 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003166 unsigned int i;
3167
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003168 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003169 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003170 if (unlikely(old_tracer != current_trace && current_trace)) {
3171 old_tracer = current_trace;
3172 *iter->trace = *current_trace;
3173 }
3174 mutex_unlock(&trace_types_lock);
3175
3176 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003177
3178 if (iter->trace->splice_read) {
3179 ret = iter->trace->splice_read(iter, filp,
3180 ppos, pipe, len, flags);
3181 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05003182 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003183 }
3184
3185 ret = tracing_wait_pipe(filp);
3186 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05003187 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003188
3189 if (!iter->ent && !find_next_entry_inc(iter)) {
3190 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05003191 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003192 }
3193
Lai Jiangshan4f535962009-05-18 19:35:34 +08003194 trace_event_read_lock();
3195
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003196 /* Fill as many pages as possible. */
3197 for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) {
3198 pages[i] = alloc_page(GFP_KERNEL);
Steven Rostedt34cd4992009-02-09 12:06:29 -05003199 if (!pages[i])
3200 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003201
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01003202 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003203
3204 /* Copy the data into the page, so we can start over. */
3205 ret = trace_seq_to_buffer(&iter->seq,
3206 page_address(pages[i]),
3207 iter->seq.len);
3208 if (ret < 0) {
3209 __free_page(pages[i]);
3210 break;
3211 }
3212 partial[i].offset = 0;
3213 partial[i].len = iter->seq.len;
3214
Steven Rostedtf9520752009-03-02 14:04:40 -05003215 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003216 }
3217
Lai Jiangshan4f535962009-05-18 19:35:34 +08003218 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003219 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003220
3221 spd.nr_pages = i;
3222
3223 return splice_to_pipe(pipe, &spd);
3224
Steven Rostedt34cd4992009-02-09 12:06:29 -05003225out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003226 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003227
3228 return ret;
3229}
3230
Steven Rostedta98a3c32008-05-12 21:20:59 +02003231static ssize_t
3232tracing_entries_read(struct file *filp, char __user *ubuf,
3233 size_t cnt, loff_t *ppos)
3234{
3235 struct trace_array *tr = filp->private_data;
Steven Rostedtdb526ca2009-03-12 13:53:25 -04003236 char buf[96];
Steven Rostedta98a3c32008-05-12 21:20:59 +02003237 int r;
3238
Steven Rostedtdb526ca2009-03-12 13:53:25 -04003239 mutex_lock(&trace_types_lock);
3240 if (!ring_buffer_expanded)
3241 r = sprintf(buf, "%lu (expanded: %lu)\n",
3242 tr->entries >> 10,
3243 trace_buf_size >> 10);
3244 else
3245 r = sprintf(buf, "%lu\n", tr->entries >> 10);
3246 mutex_unlock(&trace_types_lock);
3247
Steven Rostedta98a3c32008-05-12 21:20:59 +02003248 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3249}
3250
3251static ssize_t
3252tracing_entries_write(struct file *filp, const char __user *ubuf,
3253 size_t cnt, loff_t *ppos)
3254{
3255 unsigned long val;
3256 char buf[64];
Steven Rostedtbf5e6512008-11-10 21:46:00 -05003257 int ret, cpu;
Steven Rostedta98a3c32008-05-12 21:20:59 +02003258
Steven Rostedtcffae432008-05-12 21:21:00 +02003259 if (cnt >= sizeof(buf))
3260 return -EINVAL;
Steven Rostedta98a3c32008-05-12 21:20:59 +02003261
3262 if (copy_from_user(&buf, ubuf, cnt))
3263 return -EFAULT;
3264
3265 buf[cnt] = 0;
3266
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02003267 ret = strict_strtoul(buf, 10, &val);
3268 if (ret < 0)
3269 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02003270
3271 /* must have at least 1 entry */
3272 if (!val)
3273 return -EINVAL;
3274
3275 mutex_lock(&trace_types_lock);
3276
Steven Rostedtc76f0692008-11-07 22:36:02 -05003277 tracing_stop();
Steven Rostedta98a3c32008-05-12 21:20:59 +02003278
Steven Rostedtbf5e6512008-11-10 21:46:00 -05003279 /* disable all cpu buffers */
3280 for_each_tracing_cpu(cpu) {
3281 if (global_trace.data[cpu])
3282 atomic_inc(&global_trace.data[cpu]->disabled);
3283 if (max_tr.data[cpu])
3284 atomic_inc(&max_tr.data[cpu]->disabled);
3285 }
3286
Steven Rostedt1696b2b2008-11-13 00:09:35 -05003287 /* value is in KB */
3288 val <<= 10;
3289
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003290 if (val != global_trace.entries) {
Steven Rostedt73c51622009-03-11 13:42:01 -04003291 ret = tracing_resize_ring_buffer(val);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003292 if (ret < 0) {
3293 cnt = ret;
Steven Rostedt3eefae92008-05-12 21:21:04 +02003294 goto out;
3295 }
Steven Rostedta98a3c32008-05-12 21:20:59 +02003296 }
3297
3298 filp->f_pos += cnt;
3299
Steven Rostedt19384c02008-05-22 00:22:16 -04003300 /* If check pages failed, return ENOMEM */
3301 if (tracing_disabled)
3302 cnt = -ENOMEM;
Steven Rostedta98a3c32008-05-12 21:20:59 +02003303 out:
Steven Rostedtbf5e6512008-11-10 21:46:00 -05003304 for_each_tracing_cpu(cpu) {
3305 if (global_trace.data[cpu])
3306 atomic_dec(&global_trace.data[cpu]->disabled);
3307 if (max_tr.data[cpu])
3308 atomic_dec(&max_tr.data[cpu]->disabled);
3309 }
3310
Steven Rostedtc76f0692008-11-07 22:36:02 -05003311 tracing_start();
Steven Rostedta98a3c32008-05-12 21:20:59 +02003312 max_tr.entries = global_trace.entries;
3313 mutex_unlock(&trace_types_lock);
3314
3315 return cnt;
3316}
3317
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03003318static int mark_printk(const char *fmt, ...)
3319{
3320 int ret;
3321 va_list args;
3322 va_start(args, fmt);
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003323 ret = trace_vprintk(0, fmt, args);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03003324 va_end(args);
3325 return ret;
3326}
3327
3328static ssize_t
3329tracing_mark_write(struct file *filp, const char __user *ubuf,
3330 size_t cnt, loff_t *fpos)
3331{
3332 char *buf;
3333 char *end;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03003334
Steven Rostedtc76f0692008-11-07 22:36:02 -05003335 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03003336 return -EINVAL;
3337
3338 if (cnt > TRACE_BUF_SIZE)
3339 cnt = TRACE_BUF_SIZE;
3340
3341 buf = kmalloc(cnt + 1, GFP_KERNEL);
3342 if (buf == NULL)
3343 return -ENOMEM;
3344
3345 if (copy_from_user(buf, ubuf, cnt)) {
3346 kfree(buf);
3347 return -EFAULT;
3348 }
3349
3350 /* Cut from the first nil or newline. */
3351 buf[cnt] = '\0';
3352 end = strchr(buf, '\n');
3353 if (end)
3354 *end = '\0';
3355
3356 cnt = mark_printk("%s\n", buf);
3357 kfree(buf);
3358 *fpos += cnt;
3359
3360 return cnt;
3361}
3362
Zhaolei5079f322009-08-25 16:12:56 +08003363static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
3364 size_t cnt, loff_t *ppos)
3365{
3366 char buf[64];
3367 int bufiter = 0;
3368 int i;
3369
3370 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3371 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
3372 "%s%s%s%s", i ? " " : "",
3373 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3374 i == trace_clock_id ? "]" : "");
3375 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
3376
3377 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
3378}
3379
3380static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3381 size_t cnt, loff_t *fpos)
3382{
3383 char buf[64];
3384 const char *clockstr;
3385 int i;
3386
3387 if (cnt >= sizeof(buf))
3388 return -EINVAL;
3389
3390 if (copy_from_user(&buf, ubuf, cnt))
3391 return -EFAULT;
3392
3393 buf[cnt] = 0;
3394
3395 clockstr = strstrip(buf);
3396
3397 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3398 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3399 break;
3400 }
3401 if (i == ARRAY_SIZE(trace_clocks))
3402 return -EINVAL;
3403
3404 trace_clock_id = i;
3405
3406 mutex_lock(&trace_types_lock);
3407
3408 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3409 if (max_tr.buffer)
3410 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3411
3412 mutex_unlock(&trace_types_lock);
3413
3414 *fpos += cnt;
3415
3416 return cnt;
3417}
3418
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003419static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003420 .open = tracing_open_generic,
3421 .read = tracing_max_lat_read,
3422 .write = tracing_max_lat_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003423};
3424
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003425static const struct file_operations tracing_ctrl_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003426 .open = tracing_open_generic,
3427 .read = tracing_ctrl_read,
3428 .write = tracing_ctrl_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003429};
3430
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003431static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003432 .open = tracing_open_generic,
3433 .read = tracing_set_trace_read,
3434 .write = tracing_set_trace_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435};
3436
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003437static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003438 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003439 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003440 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003441 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003442 .release = tracing_release_pipe,
Steven Rostedtb3806b42008-05-12 21:20:46 +02003443};
3444
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003445static const struct file_operations tracing_entries_fops = {
Steven Rostedta98a3c32008-05-12 21:20:59 +02003446 .open = tracing_open_generic,
3447 .read = tracing_entries_read,
3448 .write = tracing_entries_write,
3449};
3450
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003451static const struct file_operations tracing_mark_fops = {
Frédéric Weisbecker43a15382008-09-21 20:16:30 +02003452 .open = tracing_open_generic,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03003453 .write = tracing_mark_write,
3454};
3455
Zhaolei5079f322009-08-25 16:12:56 +08003456static const struct file_operations trace_clock_fops = {
3457 .open = tracing_open_generic,
3458 .read = tracing_clock_read,
3459 .write = tracing_clock_write,
3460};
3461
Steven Rostedt2cadf912008-12-01 22:20:19 -05003462struct ftrace_buffer_info {
3463 struct trace_array *tr;
3464 void *spare;
3465 int cpu;
3466 unsigned int read;
3467};
3468
3469static int tracing_buffers_open(struct inode *inode, struct file *filp)
3470{
3471 int cpu = (int)(long)inode->i_private;
3472 struct ftrace_buffer_info *info;
3473
3474 if (tracing_disabled)
3475 return -ENODEV;
3476
3477 info = kzalloc(sizeof(*info), GFP_KERNEL);
3478 if (!info)
3479 return -ENOMEM;
3480
3481 info->tr = &global_trace;
3482 info->cpu = cpu;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08003483 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05003484 /* Force reading ring buffer for first read */
3485 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05003486
3487 filp->private_data = info;
3488
Lai Jiangshand1e7e022009-04-02 15:16:56 +08003489 return nonseekable_open(inode, filp);
Steven Rostedt2cadf912008-12-01 22:20:19 -05003490}
3491
3492static ssize_t
3493tracing_buffers_read(struct file *filp, char __user *ubuf,
3494 size_t count, loff_t *ppos)
3495{
3496 struct ftrace_buffer_info *info = filp->private_data;
3497 unsigned int pos;
3498 ssize_t ret;
3499 size_t size;
3500
Steven Rostedt2dc5d122009-03-04 19:10:05 -05003501 if (!count)
3502 return 0;
3503
Lai Jiangshanddd538f2009-04-02 15:16:59 +08003504 if (!info->spare)
3505 info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
3506 if (!info->spare)
3507 return -ENOMEM;
3508
Steven Rostedt2cadf912008-12-01 22:20:19 -05003509 /* Do we have previous read data to read? */
3510 if (info->read < PAGE_SIZE)
3511 goto read;
3512
3513 info->read = 0;
3514
3515 ret = ring_buffer_read_page(info->tr->buffer,
3516 &info->spare,
3517 count,
3518 info->cpu, 0);
3519 if (ret < 0)
3520 return 0;
3521
3522 pos = ring_buffer_page_len(info->spare);
3523
3524 if (pos < PAGE_SIZE)
3525 memset(info->spare + pos, 0, PAGE_SIZE - pos);
3526
3527read:
3528 size = PAGE_SIZE - info->read;
3529 if (size > count)
3530 size = count;
3531
3532 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt2dc5d122009-03-04 19:10:05 -05003533 if (ret == size)
Steven Rostedt2cadf912008-12-01 22:20:19 -05003534 return -EFAULT;
Steven Rostedt2dc5d122009-03-04 19:10:05 -05003535 size -= ret;
3536
Steven Rostedt2cadf912008-12-01 22:20:19 -05003537 *ppos += size;
3538 info->read += size;
3539
3540 return size;
3541}
3542
3543static int tracing_buffers_release(struct inode *inode, struct file *file)
3544{
3545 struct ftrace_buffer_info *info = file->private_data;
3546
Lai Jiangshanddd538f2009-04-02 15:16:59 +08003547 if (info->spare)
3548 ring_buffer_free_read_page(info->tr->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05003549 kfree(info);
3550
3551 return 0;
3552}
3553
3554struct buffer_ref {
3555 struct ring_buffer *buffer;
3556 void *page;
3557 int ref;
3558};
3559
3560static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
3561 struct pipe_buffer *buf)
3562{
3563 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3564
3565 if (--ref->ref)
3566 return;
3567
3568 ring_buffer_free_read_page(ref->buffer, ref->page);
3569 kfree(ref);
3570 buf->private = 0;
3571}
3572
3573static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
3574 struct pipe_buffer *buf)
3575{
3576 return 1;
3577}
3578
3579static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3580 struct pipe_buffer *buf)
3581{
3582 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3583
3584 ref->ref++;
3585}
3586
3587/* Pipe buffer operations for a buffer. */
3588static struct pipe_buf_operations buffer_pipe_buf_ops = {
3589 .can_merge = 0,
3590 .map = generic_pipe_buf_map,
3591 .unmap = generic_pipe_buf_unmap,
3592 .confirm = generic_pipe_buf_confirm,
3593 .release = buffer_pipe_buf_release,
3594 .steal = buffer_pipe_buf_steal,
3595 .get = buffer_pipe_buf_get,
3596};
3597
3598/*
3599 * Callback from splice_to_pipe(), if we need to release some pages
3600 * at the end of the spd in case we error'ed out in filling the pipe.
3601 */
3602static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
3603{
3604 struct buffer_ref *ref =
3605 (struct buffer_ref *)spd->partial[i].private;
3606
3607 if (--ref->ref)
3608 return;
3609
3610 ring_buffer_free_read_page(ref->buffer, ref->page);
3611 kfree(ref);
3612 spd->partial[i].private = 0;
3613}
3614
3615static ssize_t
3616tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3617 struct pipe_inode_info *pipe, size_t len,
3618 unsigned int flags)
3619{
3620 struct ftrace_buffer_info *info = file->private_data;
3621 struct partial_page partial[PIPE_BUFFERS];
3622 struct page *pages[PIPE_BUFFERS];
3623 struct splice_pipe_desc spd = {
3624 .pages = pages,
3625 .partial = partial,
3626 .flags = flags,
3627 .ops = &buffer_pipe_buf_ops,
3628 .spd_release = buffer_spd_release,
3629 };
3630 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04003631 int entries, size, i;
Steven Rostedt2cadf912008-12-01 22:20:19 -05003632 size_t ret;
3633
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08003634 if (*ppos & (PAGE_SIZE - 1)) {
3635 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
3636 return -EINVAL;
3637 }
3638
3639 if (len & (PAGE_SIZE - 1)) {
3640 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
3641 if (len < PAGE_SIZE)
3642 return -EINVAL;
3643 len &= PAGE_MASK;
3644 }
3645
Steven Rostedt93459c62009-04-29 00:23:13 -04003646 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3647
3648 for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05003649 struct page *page;
3650 int r;
3651
3652 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
3653 if (!ref)
3654 break;
3655
Steven Rostedt7267fa62009-04-29 00:16:21 -04003656 ref->ref = 1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05003657 ref->buffer = info->tr->buffer;
3658 ref->page = ring_buffer_alloc_read_page(ref->buffer);
3659 if (!ref->page) {
3660 kfree(ref);
3661 break;
3662 }
3663
3664 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtf2957f12009-04-29 00:26:30 -04003665 len, info->cpu, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05003666 if (r < 0) {
3667 ring_buffer_free_read_page(ref->buffer,
3668 ref->page);
3669 kfree(ref);
3670 break;
3671 }
3672
3673 /*
3674 * zero out any left over data, this is going to
3675 * user land.
3676 */
3677 size = ring_buffer_page_len(ref->page);
3678 if (size < PAGE_SIZE)
3679 memset(ref->page + size, 0, PAGE_SIZE - size);
3680
3681 page = virt_to_page(ref->page);
3682
3683 spd.pages[i] = page;
3684 spd.partial[i].len = PAGE_SIZE;
3685 spd.partial[i].offset = 0;
3686 spd.partial[i].private = (unsigned long)ref;
3687 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08003688 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04003689
3690 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
Steven Rostedt2cadf912008-12-01 22:20:19 -05003691 }
3692
3693 spd.nr_pages = i;
3694
3695 /* did we read anything? */
3696 if (!spd.nr_pages) {
3697 if (flags & SPLICE_F_NONBLOCK)
3698 ret = -EAGAIN;
3699 else
3700 ret = 0;
3701 /* TODO: block */
3702 return ret;
3703 }
3704
3705 ret = splice_to_pipe(pipe, &spd);
3706
3707 return ret;
3708}
3709
3710static const struct file_operations tracing_buffers_fops = {
3711 .open = tracing_buffers_open,
3712 .read = tracing_buffers_read,
3713 .release = tracing_buffers_release,
3714 .splice_read = tracing_buffers_splice_read,
3715 .llseek = no_llseek,
3716};
3717
Steven Rostedtc8d77182009-04-29 18:03:45 -04003718static ssize_t
3719tracing_stats_read(struct file *filp, char __user *ubuf,
3720 size_t count, loff_t *ppos)
3721{
3722 unsigned long cpu = (unsigned long)filp->private_data;
3723 struct trace_array *tr = &global_trace;
3724 struct trace_seq *s;
3725 unsigned long cnt;
3726
Li Zefane4f2d102009-06-15 10:57:28 +08003727 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04003728 if (!s)
3729 return ENOMEM;
3730
3731 trace_seq_init(s);
3732
3733 cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
3734 trace_seq_printf(s, "entries: %ld\n", cnt);
3735
3736 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
3737 trace_seq_printf(s, "overrun: %ld\n", cnt);
3738
3739 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
3740 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
3741
Steven Rostedtc8d77182009-04-29 18:03:45 -04003742 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
3743
3744 kfree(s);
3745
3746 return count;
3747}
3748
3749static const struct file_operations tracing_stats_fops = {
3750 .open = tracing_open_generic,
3751 .read = tracing_stats_read,
3752};
3753
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003754#ifdef CONFIG_DYNAMIC_FTRACE
3755
Steven Rostedtb807c3d2008-10-30 16:08:33 -04003756int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003757{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04003758 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003759}
3760
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003761static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04003762tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003763 size_t cnt, loff_t *ppos)
3764{
Steven Rostedta26a2a22008-10-31 00:03:22 -04003765 static char ftrace_dyn_info_buffer[1024];
3766 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003767 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04003768 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04003769 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003770 int r;
3771
Steven Rostedtb807c3d2008-10-30 16:08:33 -04003772 mutex_lock(&dyn_info_mutex);
3773 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003774
Steven Rostedta26a2a22008-10-31 00:03:22 -04003775 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04003776 buf[r++] = '\n';
3777
3778 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3779
3780 mutex_unlock(&dyn_info_mutex);
3781
3782 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003783}
3784
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003785static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003786 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04003787 .read = tracing_read_dyn_info,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003788};
3789#endif
3790
3791static struct dentry *d_tracer;
3792
3793struct dentry *tracing_init_dentry(void)
3794{
3795 static int once;
3796
3797 if (d_tracer)
3798 return d_tracer;
3799
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01003800 if (!debugfs_initialized())
3801 return NULL;
3802
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003803 d_tracer = debugfs_create_dir("tracing", NULL);
3804
3805 if (!d_tracer && !once) {
3806 once = 1;
3807 pr_warning("Could not create debugfs directory 'tracing'\n");
3808 return NULL;
3809 }
3810
3811 return d_tracer;
3812}
3813
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003814static struct dentry *d_percpu;
3815
3816struct dentry *tracing_dentry_percpu(void)
3817{
3818 static int once;
3819 struct dentry *d_tracer;
3820
3821 if (d_percpu)
3822 return d_percpu;
3823
3824 d_tracer = tracing_init_dentry();
3825
3826 if (!d_tracer)
3827 return NULL;
3828
3829 d_percpu = debugfs_create_dir("per_cpu", d_tracer);
3830
3831 if (!d_percpu && !once) {
3832 once = 1;
3833 pr_warning("Could not create debugfs directory 'per_cpu'\n");
3834 return NULL;
3835 }
3836
3837 return d_percpu;
3838}
3839
3840static void tracing_init_debugfs_percpu(long cpu)
3841{
3842 struct dentry *d_percpu = tracing_dentry_percpu();
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003843 struct dentry *d_cpu;
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01003844 /* strlen(cpu) + MAX(log10(cpu)) + '\0' */
3845 char cpu_dir[7];
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003846
3847 if (cpu > 999 || cpu < 0)
3848 return;
3849
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01003850 sprintf(cpu_dir, "cpu%ld", cpu);
3851 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
3852 if (!d_cpu) {
3853 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
3854 return;
3855 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003856
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01003857 /* per cpu trace_pipe */
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003858 trace_create_file("trace_pipe", 0444, d_cpu,
3859 (void *) cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003860
3861 /* per cpu trace */
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003862 trace_create_file("trace", 0644, d_cpu,
3863 (void *) cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04003864
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003865 trace_create_file("trace_pipe_raw", 0444, d_cpu,
3866 (void *) cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04003867
3868 trace_create_file("stats", 0444, d_cpu,
3869 (void *) cpu, &tracing_stats_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003870}
3871
Steven Rostedt60a11772008-05-12 21:20:44 +02003872#ifdef CONFIG_FTRACE_SELFTEST
3873/* Let selftest have access to static functions in this file */
3874#include "trace_selftest.c"
3875#endif
3876
Steven Rostedt577b7852009-02-26 23:43:05 -05003877struct trace_option_dentry {
3878 struct tracer_opt *opt;
3879 struct tracer_flags *flags;
3880 struct dentry *entry;
3881};
3882
3883static ssize_t
3884trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
3885 loff_t *ppos)
3886{
3887 struct trace_option_dentry *topt = filp->private_data;
3888 char *buf;
3889
3890 if (topt->flags->val & topt->opt->bit)
3891 buf = "1\n";
3892 else
3893 buf = "0\n";
3894
3895 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
3896}
3897
3898static ssize_t
3899trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
3900 loff_t *ppos)
3901{
3902 struct trace_option_dentry *topt = filp->private_data;
3903 unsigned long val;
3904 char buf[64];
3905 int ret;
3906
3907 if (cnt >= sizeof(buf))
3908 return -EINVAL;
3909
3910 if (copy_from_user(&buf, ubuf, cnt))
3911 return -EFAULT;
3912
3913 buf[cnt] = 0;
3914
3915 ret = strict_strtoul(buf, 10, &val);
3916 if (ret < 0)
3917 return ret;
3918
3919 ret = 0;
3920 switch (val) {
3921 case 0:
3922 /* do nothing if already cleared */
3923 if (!(topt->flags->val & topt->opt->bit))
3924 break;
3925
3926 mutex_lock(&trace_types_lock);
3927 if (current_trace->set_flag)
3928 ret = current_trace->set_flag(topt->flags->val,
3929 topt->opt->bit, 0);
3930 mutex_unlock(&trace_types_lock);
3931 if (ret)
3932 return ret;
3933 topt->flags->val &= ~topt->opt->bit;
3934 break;
3935 case 1:
3936 /* do nothing if already set */
3937 if (topt->flags->val & topt->opt->bit)
3938 break;
3939
3940 mutex_lock(&trace_types_lock);
3941 if (current_trace->set_flag)
3942 ret = current_trace->set_flag(topt->flags->val,
3943 topt->opt->bit, 1);
3944 mutex_unlock(&trace_types_lock);
3945 if (ret)
3946 return ret;
3947 topt->flags->val |= topt->opt->bit;
3948 break;
3949
3950 default:
3951 return -EINVAL;
3952 }
3953
3954 *ppos += cnt;
3955
3956 return cnt;
3957}
3958
3959
3960static const struct file_operations trace_options_fops = {
3961 .open = tracing_open_generic,
3962 .read = trace_options_read,
3963 .write = trace_options_write,
3964};
3965
Steven Rostedta8259072009-02-26 22:19:12 -05003966static ssize_t
3967trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
3968 loff_t *ppos)
3969{
3970 long index = (long)filp->private_data;
3971 char *buf;
3972
3973 if (trace_flags & (1 << index))
3974 buf = "1\n";
3975 else
3976 buf = "0\n";
3977
3978 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
3979}
3980
3981static ssize_t
3982trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3983 loff_t *ppos)
3984{
3985 long index = (long)filp->private_data;
3986 char buf[64];
3987 unsigned long val;
3988 int ret;
3989
3990 if (cnt >= sizeof(buf))
3991 return -EINVAL;
3992
3993 if (copy_from_user(&buf, ubuf, cnt))
3994 return -EFAULT;
3995
3996 buf[cnt] = 0;
3997
3998 ret = strict_strtoul(buf, 10, &val);
3999 if (ret < 0)
4000 return ret;
4001
Zhaoleif2d84b62009-08-07 18:55:48 +08004002 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05004003 return -EINVAL;
Zhaoleif2d84b62009-08-07 18:55:48 +08004004 set_tracer_flags(1 << index, val);
Steven Rostedta8259072009-02-26 22:19:12 -05004005
4006 *ppos += cnt;
4007
4008 return cnt;
4009}
4010
Steven Rostedta8259072009-02-26 22:19:12 -05004011static const struct file_operations trace_options_core_fops = {
4012 .open = tracing_open_generic,
4013 .read = trace_options_core_read,
4014 .write = trace_options_core_write,
4015};
4016
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004017struct dentry *trace_create_file(const char *name,
4018 mode_t mode,
4019 struct dentry *parent,
4020 void *data,
4021 const struct file_operations *fops)
4022{
4023 struct dentry *ret;
4024
4025 ret = debugfs_create_file(name, mode, parent, data, fops);
4026 if (!ret)
4027 pr_warning("Could not create debugfs '%s' entry\n", name);
4028
4029 return ret;
4030}
4031
4032
Steven Rostedta8259072009-02-26 22:19:12 -05004033static struct dentry *trace_options_init_dentry(void)
4034{
4035 struct dentry *d_tracer;
4036 static struct dentry *t_options;
4037
4038 if (t_options)
4039 return t_options;
4040
4041 d_tracer = tracing_init_dentry();
4042 if (!d_tracer)
4043 return NULL;
4044
4045 t_options = debugfs_create_dir("options", d_tracer);
4046 if (!t_options) {
4047 pr_warning("Could not create debugfs directory 'options'\n");
4048 return NULL;
4049 }
4050
4051 return t_options;
4052}
4053
Steven Rostedt577b7852009-02-26 23:43:05 -05004054static void
4055create_trace_option_file(struct trace_option_dentry *topt,
4056 struct tracer_flags *flags,
4057 struct tracer_opt *opt)
4058{
4059 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05004060
4061 t_options = trace_options_init_dentry();
4062 if (!t_options)
4063 return;
4064
4065 topt->flags = flags;
4066 topt->opt = opt;
4067
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004068 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05004069 &trace_options_fops);
4070
Steven Rostedt577b7852009-02-26 23:43:05 -05004071}
4072
4073static struct trace_option_dentry *
4074create_trace_option_files(struct tracer *tracer)
4075{
4076 struct trace_option_dentry *topts;
4077 struct tracer_flags *flags;
4078 struct tracer_opt *opts;
4079 int cnt;
4080
4081 if (!tracer)
4082 return NULL;
4083
4084 flags = tracer->flags;
4085
4086 if (!flags || !flags->opts)
4087 return NULL;
4088
4089 opts = flags->opts;
4090
4091 for (cnt = 0; opts[cnt].name; cnt++)
4092 ;
4093
Steven Rostedt0cfe8242009-02-27 10:51:10 -05004094 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05004095 if (!topts)
4096 return NULL;
4097
4098 for (cnt = 0; opts[cnt].name; cnt++)
4099 create_trace_option_file(&topts[cnt], flags,
4100 &opts[cnt]);
4101
4102 return topts;
4103}
4104
4105static void
4106destroy_trace_option_files(struct trace_option_dentry *topts)
4107{
4108 int cnt;
4109
4110 if (!topts)
4111 return;
4112
4113 for (cnt = 0; topts[cnt].opt; cnt++) {
4114 if (topts[cnt].entry)
4115 debugfs_remove(topts[cnt].entry);
4116 }
4117
4118 kfree(topts);
4119}
4120
Steven Rostedta8259072009-02-26 22:19:12 -05004121static struct dentry *
4122create_trace_option_core_file(const char *option, long index)
4123{
4124 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05004125
4126 t_options = trace_options_init_dentry();
4127 if (!t_options)
4128 return NULL;
4129
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004130 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05004131 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05004132}
4133
4134static __init void create_trace_options_dir(void)
4135{
4136 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05004137 int i;
4138
4139 t_options = trace_options_init_dentry();
4140 if (!t_options)
4141 return;
4142
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004143 for (i = 0; trace_options[i]; i++)
4144 create_trace_option_core_file(trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05004145}
4146
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01004147static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004148{
4149 struct dentry *d_tracer;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004150 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004151
4152 d_tracer = tracing_init_dentry();
4153
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004154 trace_create_file("tracing_enabled", 0644, d_tracer,
4155 &global_trace, &tracing_ctrl_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004156
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004157 trace_create_file("trace_options", 0644, d_tracer,
4158 NULL, &tracing_iter_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004159
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004160 trace_create_file("tracing_cpumask", 0644, d_tracer,
4161 NULL, &tracing_cpumask_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05004162
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004163 trace_create_file("trace", 0644, d_tracer,
4164 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004165
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004166 trace_create_file("available_tracers", 0444, d_tracer,
4167 &global_trace, &show_traces_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004168
Li Zefan339ae5d2009-04-17 10:34:30 +08004169 trace_create_file("current_tracer", 0644, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004170 &global_trace, &set_tracer_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004171
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04004172#ifdef CONFIG_TRACER_MAX_TRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004173 trace_create_file("tracing_max_latency", 0644, d_tracer,
4174 &tracing_max_latency, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004175
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004176 trace_create_file("tracing_thresh", 0644, d_tracer,
4177 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04004178#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004179
Li Zefan339ae5d2009-04-17 10:34:30 +08004180 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004181 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004182
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004183 trace_create_file("trace_pipe", 0444, d_tracer,
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004184 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004185
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004186 trace_create_file("buffer_size_kb", 0644, d_tracer,
4187 &global_trace, &tracing_entries_fops);
Steven Rostedta98a3c32008-05-12 21:20:59 +02004188
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004189 trace_create_file("trace_marker", 0220, d_tracer,
4190 NULL, &tracing_mark_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004191
Avadh Patel69abe6a2009-04-10 16:04:48 -04004192 trace_create_file("saved_cmdlines", 0444, d_tracer,
4193 NULL, &tracing_saved_cmdlines_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004194
Zhaolei5079f322009-08-25 16:12:56 +08004195 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4196 &trace_clock_fops);
4197
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004198#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004199 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4200 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004201#endif
Ingo Molnard618b3e2008-05-12 21:20:49 +02004202#ifdef CONFIG_SYSPROF_TRACER
4203 init_tracer_sysprof_debugfs(d_tracer);
4204#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004205
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004206 create_trace_options_dir();
4207
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004208 for_each_tracing_cpu(cpu)
4209 tracing_init_debugfs_percpu(cpu);
4210
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01004211 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004212}
4213
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004214static int trace_panic_handler(struct notifier_block *this,
4215 unsigned long event, void *unused)
4216{
Steven Rostedt944ac422008-10-23 19:26:08 -04004217 if (ftrace_dump_on_oops)
4218 ftrace_dump();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004219 return NOTIFY_OK;
4220}
4221
4222static struct notifier_block trace_panic_notifier = {
4223 .notifier_call = trace_panic_handler,
4224 .next = NULL,
4225 .priority = 150 /* priority: INT_MAX >= x >= 0 */
4226};
4227
4228static int trace_die_handler(struct notifier_block *self,
4229 unsigned long val,
4230 void *data)
4231{
4232 switch (val) {
4233 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04004234 if (ftrace_dump_on_oops)
4235 ftrace_dump();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004236 break;
4237 default:
4238 break;
4239 }
4240 return NOTIFY_OK;
4241}
4242
4243static struct notifier_block trace_die_notifier = {
4244 .notifier_call = trace_die_handler,
4245 .priority = 200
4246};
4247
4248/*
4249 * printk is set to max of 1024, we really don't need it that big.
4250 * Nothing should be printing 1000 characters anyway.
4251 */
4252#define TRACE_MAX_PRINT 1000
4253
4254/*
4255 * Define here KERN_TRACE so that we have one place to modify
4256 * it if we decide to change what log level the ftrace dump
4257 * should be at.
4258 */
Steven Rostedt428aee12009-01-14 12:24:42 -05004259#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004260
4261static void
4262trace_printk_seq(struct trace_seq *s)
4263{
4264 /* Probably should print a warning here. */
4265 if (s->len >= 1000)
4266 s->len = 1000;
4267
4268 /* should be zero ended, but we are paranoid. */
4269 s->buffer[s->len] = 0;
4270
4271 printk(KERN_TRACE "%s", s->buffer);
4272
Steven Rostedtf9520752009-03-02 14:04:40 -05004273 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004274}
4275
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01004276static void __ftrace_dump(bool disable_tracing)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004277{
Steven Rostedtcd891ae2009-04-28 11:39:34 -04004278 static raw_spinlock_t ftrace_dump_lock =
4279 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004280 /* use static because iter can be a bit big for the stack */
4281 static struct trace_iterator iter;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01004282 unsigned int old_userobj;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004283 static int dump_ran;
Steven Rostedtd7690412008-10-01 00:29:53 -04004284 unsigned long flags;
4285 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004286
4287 /* only one dump */
Steven Rostedtcd891ae2009-04-28 11:39:34 -04004288 local_irq_save(flags);
4289 __raw_spin_lock(&ftrace_dump_lock);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004290 if (dump_ran)
4291 goto out;
4292
4293 dump_ran = 1;
4294
Steven Rostedt0ee6b6c2009-01-14 14:50:19 -05004295 tracing_off();
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01004296
4297 if (disable_tracing)
4298 ftrace_kill();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004299
Steven Rostedtd7690412008-10-01 00:29:53 -04004300 for_each_tracing_cpu(cpu) {
4301 atomic_inc(&global_trace.data[cpu]->disabled);
4302 }
4303
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01004304 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4305
Török Edwinb54d3de2008-11-22 13:28:48 +02004306 /* don't look at user memory in panic mode */
4307 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4308
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004309 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4310
Steven Rostedte543ad72009-03-04 18:20:36 -05004311 /* Simulate the iterator */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004312 iter.tr = &global_trace;
4313 iter.trace = current_trace;
Steven Rostedte543ad72009-03-04 18:20:36 -05004314 iter.cpu_file = TRACE_PIPE_ALL_CPU;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004315
4316 /*
4317 * We need to stop all tracing on all CPUS to read the
4318 * the next buffer. This is a bit expensive, but is
4319 * not done often. We fill all what we can read,
4320 * and then release the locks again.
4321 */
4322
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004323 while (!trace_empty(&iter)) {
4324
4325 if (!cnt)
4326 printk(KERN_TRACE "---------------------------------\n");
4327
4328 cnt++;
4329
4330 /* reset all but tr, trace, and overruns */
4331 memset(&iter.seq, 0,
4332 sizeof(struct trace_iterator) -
4333 offsetof(struct trace_iterator, seq));
4334 iter.iter_flags |= TRACE_FILE_LAT_FMT;
4335 iter.pos = -1;
4336
4337 if (find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004338 int ret;
4339
4340 ret = print_trace_line(&iter);
4341 if (ret != TRACE_TYPE_NO_CONSUME)
4342 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004343 }
4344
4345 trace_printk_seq(&iter.seq);
4346 }
4347
4348 if (!cnt)
4349 printk(KERN_TRACE " (ftrace buffer empty)\n");
4350 else
4351 printk(KERN_TRACE "---------------------------------\n");
4352
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01004353 /* Re-enable tracing if requested */
4354 if (!disable_tracing) {
4355 trace_flags |= old_userobj;
4356
4357 for_each_tracing_cpu(cpu) {
4358 atomic_dec(&global_trace.data[cpu]->disabled);
4359 }
4360 tracing_on();
4361 }
4362
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004363 out:
Steven Rostedtcd891ae2009-04-28 11:39:34 -04004364 __raw_spin_unlock(&ftrace_dump_lock);
4365 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004366}
4367
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01004368/* By default: disable tracing after the dump */
4369void ftrace_dump(void)
4370{
4371 __ftrace_dump(true);
4372}
4373
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004374__init static int tracer_alloc_buffers(void)
4375{
Steven Rostedt73c51622009-03-11 13:42:01 -04004376 int ring_buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004377 int i;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304378 int ret = -ENOMEM;
4379
4380 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
4381 goto out;
4382
4383 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4384 goto out_free_buffer_mask;
4385
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004386 if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
4387 goto out_free_tracing_cpumask;
4388
Steven Rostedt73c51622009-03-11 13:42:01 -04004389 /* To save memory, keep the ring buffer size to its minimum */
4390 if (ring_buffer_expanded)
4391 ring_buf_size = trace_buf_size;
4392 else
4393 ring_buf_size = 1;
4394
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304395 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4396 cpumask_copy(tracing_cpumask, cpu_all_mask);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004397 cpumask_clear(tracing_reader_cpumask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004398
Steven Rostedtab464282008-05-12 21:21:00 +02004399 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt73c51622009-03-11 13:42:01 -04004400 global_trace.buffer = ring_buffer_alloc(ring_buf_size,
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004401 TRACE_BUFFER_FLAGS);
4402 if (!global_trace.buffer) {
4403 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
4404 WARN_ON(1);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304405 goto out_free_cpumask;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004406 }
4407 global_trace.entries = ring_buffer_size(global_trace.buffer);
4408
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304409
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004410#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt73c51622009-03-11 13:42:01 -04004411 max_tr.buffer = ring_buffer_alloc(ring_buf_size,
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004412 TRACE_BUFFER_FLAGS);
4413 if (!max_tr.buffer) {
4414 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4415 WARN_ON(1);
4416 ring_buffer_free(global_trace.buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304417 goto out_free_cpumask;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004418 }
4419 max_tr.entries = ring_buffer_size(max_tr.buffer);
4420 WARN_ON(max_tr.entries != global_trace.entries);
4421#endif
4422
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02004423 /* Allocate the first page for all buffers */
Steven Rostedtab464282008-05-12 21:21:00 +02004424 for_each_tracing_cpu(i) {
jolsa@redhat.com566b0aa2009-07-16 21:44:26 +02004425 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004426 max_tr.data[i] = &per_cpu(max_data, i);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004427 }
4428
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004429 trace_init_cmdlines();
4430
Frédéric Weisbecker43a15382008-09-21 20:16:30 +02004431 register_tracer(&nop_trace);
Steven Rostedt79fb0768f2009-02-02 21:38:33 -05004432 current_trace = &nop_trace;
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01004433#ifdef CONFIG_BOOT_TRACER
4434 register_tracer(&boot_tracer);
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01004435#endif
Steven Rostedt60a11772008-05-12 21:20:44 +02004436 /* All seems OK, enable tracing */
4437 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004438
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004439 atomic_notifier_chain_register(&panic_notifier_list,
4440 &trace_panic_notifier);
4441
4442 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01004443
4444 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04004445
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304446out_free_cpumask:
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004447 free_cpumask_var(tracing_reader_cpumask);
4448out_free_tracing_cpumask:
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304449 free_cpumask_var(tracing_cpumask);
4450out_free_buffer_mask:
4451 free_cpumask_var(tracing_buffer_mask);
4452out:
4453 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004454}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05004455
4456__init static int clear_boot_tracer(void)
4457{
4458 /*
4459 * The default tracer at boot buffer is an init section.
4460 * This function is called in lateinit. If we did not
4461 * find the boot tracer, then clear it out, to prevent
4462 * later registration from accessing the buffer that is
4463 * about to be freed.
4464 */
4465 if (!default_bootup_tracer)
4466 return 0;
4467
4468 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
4469 default_bootup_tracer);
4470 default_bootup_tracer = NULL;
4471
4472 return 0;
4473}
4474
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01004475early_initcall(tracer_alloc_buffers);
4476fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05004477late_initcall(clear_boot_tracer);