blob: d95ec2876bbb2bf20df4a15bda7165e45af548fe [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400184
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800185unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200186{
187 nsec += 500;
188 do_div(nsec, 1000);
189 return nsec;
190}
191
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200192/*
193 * The global_trace is the descriptor that holds the tracing
194 * buffers for the live tracing. For each CPU, it contains
195 * a link list of pages that will store trace entries. The
196 * page descriptor of the pages in the memory is used to hold
197 * the link list by linking the lru item in the page descriptor
198 * to each of the pages in the buffer per CPU.
199 *
200 * For each active CPU there is a data field that holds the
201 * pages for the buffer for that CPU. Each CPU has the same number
202 * of pages allocated for its buffer.
203 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200204static struct trace_array global_trace;
205
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400206LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200207
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400208int trace_array_get(struct trace_array *this_tr)
209{
210 struct trace_array *tr;
211 int ret = -ENODEV;
212
213 mutex_lock(&trace_types_lock);
214 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
215 if (tr == this_tr) {
216 tr->ref++;
217 ret = 0;
218 break;
219 }
220 }
221 mutex_unlock(&trace_types_lock);
222
223 return ret;
224}
225
226static void __trace_array_put(struct trace_array *this_tr)
227{
228 WARN_ON(!this_tr->ref);
229 this_tr->ref--;
230}
231
232void trace_array_put(struct trace_array *this_tr)
233{
234 mutex_lock(&trace_types_lock);
235 __trace_array_put(this_tr);
236 mutex_unlock(&trace_types_lock);
237}
238
Tom Zanussif306cc82013-10-24 08:34:17 -0500239int filter_check_discard(struct ftrace_event_file *file, void *rec,
240 struct ring_buffer *buffer,
241 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500242{
Tom Zanussif306cc82013-10-24 08:34:17 -0500243 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
244 !filter_match_preds(file->filter, rec)) {
245 ring_buffer_discard_commit(buffer, event);
246 return 1;
247 }
248
249 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500250}
Tom Zanussif306cc82013-10-24 08:34:17 -0500251EXPORT_SYMBOL_GPL(filter_check_discard);
252
253int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
254 struct ring_buffer *buffer,
255 struct ring_buffer_event *event)
256{
257 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
258 !filter_match_preds(call->filter, rec)) {
259 ring_buffer_discard_commit(buffer, event);
260 return 1;
261 }
262
263 return 0;
264}
265EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500266
Alexander Z Lam94571582013-08-02 18:36:16 -0700267cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400268{
269 u64 ts;
270
271 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700272 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400273 return trace_clock_local();
274
Alexander Z Lam94571582013-08-02 18:36:16 -0700275 ts = ring_buffer_time_stamp(buf->buffer, cpu);
276 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400277
278 return ts;
279}
280
Alexander Z Lam94571582013-08-02 18:36:16 -0700281cycle_t ftrace_now(int cpu)
282{
283 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
284}
285
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400286/**
287 * tracing_is_enabled - Show if global_trace has been disabled
288 *
289 * Shows if the global trace has been enabled or not. It uses the
290 * mirror flag "buffer_disabled" to be used in fast paths such as for
291 * the irqsoff tracer. But it may be inaccurate due to races. If you
292 * need to know the accurate state, use tracing_is_on() which is a little
293 * slower, but accurate.
294 */
Steven Rostedt90369902008-11-05 16:05:44 -0500295int tracing_is_enabled(void)
296{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297 /*
298 * For quick access (irqsoff uses this in fast path), just
299 * return the mirror variable of the state of the ring buffer.
300 * It's a little racy, but we don't really care.
301 */
302 smp_rmb();
303 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500304}
305
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200306/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400307 * trace_buf_size is the size in bytes that is allocated
308 * for a buffer. Note, the number of bytes is always rounded
309 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400310 *
311 * This number is purposely set to a low number of 16384.
312 * If the dump on oops happens, it will be much appreciated
313 * to not have to wait for all that output. Anyway this can be
314 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200315 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400316#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400317
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200319
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200320/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200321static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200322
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200323/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200324 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200325 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700326DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200327
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800328/*
329 * serialize the access of the ring buffer
330 *
331 * ring buffer serializes readers, but it is low level protection.
332 * The validity of the events (which returns by ring_buffer_peek() ..etc)
333 * are not protected by ring buffer.
334 *
335 * The content of events may become garbage if we allow other process consumes
336 * these events concurrently:
337 * A) the page of the consumed events may become a normal page
338 * (not reader page) in ring buffer, and this page will be rewrited
339 * by events producer.
340 * B) The page of the consumed events may become a page for splice_read,
341 * and this page will be returned to system.
342 *
343 * These primitives allow multi process access to different cpu ring buffer
344 * concurrently.
345 *
346 * These primitives don't distinguish read-only and read-consume access.
347 * Multi read-only access are also serialized.
348 */
349
350#ifdef CONFIG_SMP
351static DECLARE_RWSEM(all_cpu_access_lock);
352static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
353
354static inline void trace_access_lock(int cpu)
355{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500356 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800357 /* gain it for accessing the whole ring buffer. */
358 down_write(&all_cpu_access_lock);
359 } else {
360 /* gain it for accessing a cpu ring buffer. */
361
Steven Rostedtae3b5092013-01-23 15:22:59 -0500362 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800363 down_read(&all_cpu_access_lock);
364
365 /* Secondly block other access to this @cpu ring buffer. */
366 mutex_lock(&per_cpu(cpu_access_lock, cpu));
367 }
368}
369
370static inline void trace_access_unlock(int cpu)
371{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500372 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800373 up_write(&all_cpu_access_lock);
374 } else {
375 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
376 up_read(&all_cpu_access_lock);
377 }
378}
379
380static inline void trace_access_lock_init(void)
381{
382 int cpu;
383
384 for_each_possible_cpu(cpu)
385 mutex_init(&per_cpu(cpu_access_lock, cpu));
386}
387
388#else
389
390static DEFINE_MUTEX(access_lock);
391
392static inline void trace_access_lock(int cpu)
393{
394 (void)cpu;
395 mutex_lock(&access_lock);
396}
397
398static inline void trace_access_unlock(int cpu)
399{
400 (void)cpu;
401 mutex_unlock(&access_lock);
402}
403
404static inline void trace_access_lock_init(void)
405{
406}
407
408#endif
409
Steven Rostedtee6bce52008-11-12 17:52:37 -0500410/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500411unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400412 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500413 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400414 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700415
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400416static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400417{
418 if (tr->trace_buffer.buffer)
419 ring_buffer_record_on(tr->trace_buffer.buffer);
420 /*
421 * This flag is looked at when buffers haven't been allocated
422 * yet, or by some tracers (like irqsoff), that just want to
423 * know if the ring buffer has been disabled, but it can handle
424 * races of where it gets disabled but we still do a record.
425 * As the check is in the fast path of the tracers, it is more
426 * important to be fast than accurate.
427 */
428 tr->buffer_disabled = 0;
429 /* Make the flag seen by readers */
430 smp_wmb();
431}
432
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200433/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500434 * tracing_on - enable tracing buffers
435 *
436 * This function enables tracing buffers that may have been
437 * disabled with tracing_off.
438 */
439void tracing_on(void)
440{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400441 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500442}
443EXPORT_SYMBOL_GPL(tracing_on);
444
445/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500446 * __trace_puts - write a constant string into the trace buffer.
447 * @ip: The address of the caller
448 * @str: The constant string to write
449 * @size: The size of the string.
450 */
451int __trace_puts(unsigned long ip, const char *str, int size)
452{
453 struct ring_buffer_event *event;
454 struct ring_buffer *buffer;
455 struct print_entry *entry;
456 unsigned long irq_flags;
457 int alloc;
458
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500459 if (unlikely(tracing_selftest_running || tracing_disabled))
460 return 0;
461
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500462 alloc = sizeof(*entry) + size + 2; /* possible \n added */
463
464 local_save_flags(irq_flags);
465 buffer = global_trace.trace_buffer.buffer;
466 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
467 irq_flags, preempt_count());
468 if (!event)
469 return 0;
470
471 entry = ring_buffer_event_data(event);
472 entry->ip = ip;
473
474 memcpy(&entry->buf, str, size);
475
476 /* Add a newline if necessary */
477 if (entry->buf[size - 1] != '\n') {
478 entry->buf[size] = '\n';
479 entry->buf[size + 1] = '\0';
480 } else
481 entry->buf[size] = '\0';
482
483 __buffer_unlock_commit(buffer, event);
484
485 return size;
486}
487EXPORT_SYMBOL_GPL(__trace_puts);
488
489/**
490 * __trace_bputs - write the pointer to a constant string into trace buffer
491 * @ip: The address of the caller
492 * @str: The constant string to write to the buffer to
493 */
494int __trace_bputs(unsigned long ip, const char *str)
495{
496 struct ring_buffer_event *event;
497 struct ring_buffer *buffer;
498 struct bputs_entry *entry;
499 unsigned long irq_flags;
500 int size = sizeof(struct bputs_entry);
501
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500502 if (unlikely(tracing_selftest_running || tracing_disabled))
503 return 0;
504
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500505 local_save_flags(irq_flags);
506 buffer = global_trace.trace_buffer.buffer;
507 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
508 irq_flags, preempt_count());
509 if (!event)
510 return 0;
511
512 entry = ring_buffer_event_data(event);
513 entry->ip = ip;
514 entry->str = str;
515
516 __buffer_unlock_commit(buffer, event);
517
518 return 1;
519}
520EXPORT_SYMBOL_GPL(__trace_bputs);
521
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500522#ifdef CONFIG_TRACER_SNAPSHOT
523/**
524 * trace_snapshot - take a snapshot of the current buffer.
525 *
526 * This causes a swap between the snapshot buffer and the current live
527 * tracing buffer. You can use this to take snapshots of the live
528 * trace when some condition is triggered, but continue to trace.
529 *
530 * Note, make sure to allocate the snapshot with either
531 * a tracing_snapshot_alloc(), or by doing it manually
532 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
533 *
534 * If the snapshot buffer is not allocated, it will stop tracing.
535 * Basically making a permanent snapshot.
536 */
537void tracing_snapshot(void)
538{
539 struct trace_array *tr = &global_trace;
540 struct tracer *tracer = tr->current_trace;
541 unsigned long flags;
542
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500543 if (in_nmi()) {
544 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
545 internal_trace_puts("*** snapshot is being ignored ***\n");
546 return;
547 }
548
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500549 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500550 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
551 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500552 tracing_off();
553 return;
554 }
555
556 /* Note, snapshot can not be used when the tracer uses it */
557 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500558 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
559 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500560 return;
561 }
562
563 local_irq_save(flags);
564 update_max_tr(tr, current, smp_processor_id());
565 local_irq_restore(flags);
566}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500567EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500568
569static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
570 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400571static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
572
573static int alloc_snapshot(struct trace_array *tr)
574{
575 int ret;
576
577 if (!tr->allocated_snapshot) {
578
579 /* allocate spare buffer */
580 ret = resize_buffer_duplicate_size(&tr->max_buffer,
581 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
582 if (ret < 0)
583 return ret;
584
585 tr->allocated_snapshot = true;
586 }
587
588 return 0;
589}
590
591void free_snapshot(struct trace_array *tr)
592{
593 /*
594 * We don't free the ring buffer. instead, resize it because
595 * The max_tr ring buffer has some state (e.g. ring->clock) and
596 * we want preserve it.
597 */
598 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
599 set_buffer_entries(&tr->max_buffer, 1);
600 tracing_reset_online_cpus(&tr->max_buffer);
601 tr->allocated_snapshot = false;
602}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500603
604/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500605 * tracing_alloc_snapshot - allocate snapshot buffer.
606 *
607 * This only allocates the snapshot buffer if it isn't already
608 * allocated - it doesn't also take a snapshot.
609 *
610 * This is meant to be used in cases where the snapshot buffer needs
611 * to be set up for events that can't sleep but need to be able to
612 * trigger a snapshot.
613 */
614int tracing_alloc_snapshot(void)
615{
616 struct trace_array *tr = &global_trace;
617 int ret;
618
619 ret = alloc_snapshot(tr);
620 WARN_ON(ret < 0);
621
622 return ret;
623}
624EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
625
626/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500627 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
628 *
629 * This is similar to trace_snapshot(), but it will allocate the
630 * snapshot buffer if it isn't already allocated. Use this only
631 * where it is safe to sleep, as the allocation may sleep.
632 *
633 * This causes a swap between the snapshot buffer and the current live
634 * tracing buffer. You can use this to take snapshots of the live
635 * trace when some condition is triggered, but continue to trace.
636 */
637void tracing_snapshot_alloc(void)
638{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500639 int ret;
640
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500641 ret = tracing_alloc_snapshot();
642 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400643 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500644
645 tracing_snapshot();
646}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500647EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500648#else
649void tracing_snapshot(void)
650{
651 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
652}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500653EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500654int tracing_alloc_snapshot(void)
655{
656 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
657 return -ENODEV;
658}
659EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500660void tracing_snapshot_alloc(void)
661{
662 /* Give warning */
663 tracing_snapshot();
664}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500665EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500666#endif /* CONFIG_TRACER_SNAPSHOT */
667
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400668static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400669{
670 if (tr->trace_buffer.buffer)
671 ring_buffer_record_off(tr->trace_buffer.buffer);
672 /*
673 * This flag is looked at when buffers haven't been allocated
674 * yet, or by some tracers (like irqsoff), that just want to
675 * know if the ring buffer has been disabled, but it can handle
676 * races of where it gets disabled but we still do a record.
677 * As the check is in the fast path of the tracers, it is more
678 * important to be fast than accurate.
679 */
680 tr->buffer_disabled = 1;
681 /* Make the flag seen by readers */
682 smp_wmb();
683}
684
Steven Rostedt499e5472012-02-22 15:50:28 -0500685/**
686 * tracing_off - turn off tracing buffers
687 *
688 * This function stops the tracing buffers from recording data.
689 * It does not disable any overhead the tracers themselves may
690 * be causing. This function simply causes all recording to
691 * the ring buffers to fail.
692 */
693void tracing_off(void)
694{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400695 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500696}
697EXPORT_SYMBOL_GPL(tracing_off);
698
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400699void disable_trace_on_warning(void)
700{
701 if (__disable_trace_on_warning)
702 tracing_off();
703}
704
Steven Rostedt499e5472012-02-22 15:50:28 -0500705/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706 * tracer_tracing_is_on - show real state of ring buffer enabled
707 * @tr : the trace array to know if ring buffer is enabled
708 *
709 * Shows real state of the ring buffer if it is enabled or not.
710 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400711static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400712{
713 if (tr->trace_buffer.buffer)
714 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
715 return !tr->buffer_disabled;
716}
717
Steven Rostedt499e5472012-02-22 15:50:28 -0500718/**
719 * tracing_is_on - show state of ring buffers enabled
720 */
721int tracing_is_on(void)
722{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400723 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500724}
725EXPORT_SYMBOL_GPL(tracing_is_on);
726
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400727static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200728{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400729 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200730
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200731 if (!str)
732 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800733 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200734 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800735 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200736 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400737 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200738 return 1;
739}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400740__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200741
Tim Bird0e950172010-02-25 15:36:43 -0800742static int __init set_tracing_thresh(char *str)
743{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800744 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800745 int ret;
746
747 if (!str)
748 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200749 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800750 if (ret < 0)
751 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800752 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800753 return 1;
754}
755__setup("tracing_thresh=", set_tracing_thresh);
756
Steven Rostedt57f50be2008-05-12 21:20:44 +0200757unsigned long nsecs_to_usecs(unsigned long nsecs)
758{
759 return nsecs / 1000;
760}
761
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200762/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200763static const char *trace_options[] = {
764 "print-parent",
765 "sym-offset",
766 "sym-addr",
767 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200768 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200769 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200770 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200771 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200772 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100773 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500774 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500775 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500776 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200777 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200778 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100779 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200780 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500781 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400782 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400783 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800784 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800785 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400786 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500787 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700788 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400789 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200790 NULL
791};
792
Zhaolei5079f322009-08-25 16:12:56 +0800793static struct {
794 u64 (*func)(void);
795 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800796 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800797} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800798 { trace_clock_local, "local", 1 },
799 { trace_clock_global, "global", 1 },
800 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400801 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400802 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800803 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800804};
805
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200806/*
807 * trace_parser_get_init - gets the buffer for trace parser
808 */
809int trace_parser_get_init(struct trace_parser *parser, int size)
810{
811 memset(parser, 0, sizeof(*parser));
812
813 parser->buffer = kmalloc(size, GFP_KERNEL);
814 if (!parser->buffer)
815 return 1;
816
817 parser->size = size;
818 return 0;
819}
820
821/*
822 * trace_parser_put - frees the buffer for trace parser
823 */
824void trace_parser_put(struct trace_parser *parser)
825{
826 kfree(parser->buffer);
827}
828
829/*
830 * trace_get_user - reads the user input string separated by space
831 * (matched by isspace(ch))
832 *
833 * For each string found the 'struct trace_parser' is updated,
834 * and the function returns.
835 *
836 * Returns number of bytes read.
837 *
838 * See kernel/trace/trace.h for 'struct trace_parser' details.
839 */
840int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
841 size_t cnt, loff_t *ppos)
842{
843 char ch;
844 size_t read = 0;
845 ssize_t ret;
846
847 if (!*ppos)
848 trace_parser_clear(parser);
849
850 ret = get_user(ch, ubuf++);
851 if (ret)
852 goto out;
853
854 read++;
855 cnt--;
856
857 /*
858 * The parser is not finished with the last write,
859 * continue reading the user input without skipping spaces.
860 */
861 if (!parser->cont) {
862 /* skip white space */
863 while (cnt && isspace(ch)) {
864 ret = get_user(ch, ubuf++);
865 if (ret)
866 goto out;
867 read++;
868 cnt--;
869 }
870
871 /* only spaces were written */
872 if (isspace(ch)) {
873 *ppos += read;
874 ret = read;
875 goto out;
876 }
877
878 parser->idx = 0;
879 }
880
881 /* read the non-space input */
882 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800883 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200884 parser->buffer[parser->idx++] = ch;
885 else {
886 ret = -EINVAL;
887 goto out;
888 }
889 ret = get_user(ch, ubuf++);
890 if (ret)
891 goto out;
892 read++;
893 cnt--;
894 }
895
896 /* We either got finished input or we have to wait for another call. */
897 if (isspace(ch)) {
898 parser->buffer[parser->idx] = 0;
899 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400900 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200901 parser->cont = true;
902 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400903 } else {
904 ret = -EINVAL;
905 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200906 }
907
908 *ppos += read;
909 ret = read;
910
911out:
912 return ret;
913}
914
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200915ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
916{
917 int len;
918 int ret;
919
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500920 if (!cnt)
921 return 0;
922
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200923 if (s->len <= s->readpos)
924 return -EBUSY;
925
926 len = s->len - s->readpos;
927 if (cnt > len)
928 cnt = len;
929 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500930 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200931 return -EFAULT;
932
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500933 cnt -= ret;
934
Steven Rostedte74da522009-03-04 20:31:11 -0500935 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200936 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200937}
938
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200939static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200940{
941 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200942
943 if (s->len <= s->readpos)
944 return -EBUSY;
945
946 len = s->len - s->readpos;
947 if (cnt > len)
948 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300949 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200950
Steven Rostedte74da522009-03-04 20:31:11 -0500951 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200952 return cnt;
953}
954
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400955/*
956 * ftrace_max_lock is used to protect the swapping of buffers
957 * when taking a max snapshot. The buffers themselves are
958 * protected by per_cpu spinlocks. But the action of the swap
959 * needs its own lock.
960 *
Thomas Gleixner445c8952009-12-02 19:49:50 +0100961 * This is defined as a arch_spinlock_t in order to help
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400962 * with performance when lockdep debugging is enabled.
963 *
964 * It is also used in other places outside the update_max_tr
965 * so it needs to be defined outside of the
966 * CONFIG_TRACER_MAX_TRACE.
967 */
Thomas Gleixner445c8952009-12-02 19:49:50 +0100968static arch_spinlock_t ftrace_max_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +0100969 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400970
Tim Bird0e950172010-02-25 15:36:43 -0800971unsigned long __read_mostly tracing_thresh;
972
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400973#ifdef CONFIG_TRACER_MAX_TRACE
974unsigned long __read_mostly tracing_max_latency;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400975
976/*
977 * Copy the new maximum trace into the separate maximum-trace
978 * structure. (this way the maximum trace is permanently saved,
979 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
980 */
981static void
982__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
983{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500984 struct trace_buffer *trace_buf = &tr->trace_buffer;
985 struct trace_buffer *max_buf = &tr->max_buffer;
986 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
987 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500989 max_buf->cpu = cpu;
990 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400991
Steven Rostedt8248ac02009-09-02 12:27:41 -0400992 max_data->saved_latency = tracing_max_latency;
993 max_data->critical_start = data->critical_start;
994 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400995
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300996 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400997 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400998 /*
999 * If tsk == current, then use current_uid(), as that does not use
1000 * RCU. The irq tracer can be called out of RCU scope.
1001 */
1002 if (tsk == current)
1003 max_data->uid = current_uid();
1004 else
1005 max_data->uid = task_uid(tsk);
1006
Steven Rostedt8248ac02009-09-02 12:27:41 -04001007 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1008 max_data->policy = tsk->policy;
1009 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001010
1011 /* record this tasks comm */
1012 tracing_record_cmdline(tsk);
1013}
1014
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001015/**
1016 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1017 * @tr: tracer
1018 * @tsk: the task with the latency
1019 * @cpu: The cpu that initiated the trace.
1020 *
1021 * Flip the buffers between the @tr and the max_tr and record information
1022 * about which task was the cause of this latency.
1023 */
Ingo Molnare309b412008-05-12 21:20:51 +02001024void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001025update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1026{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001027 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001028
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001029 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001030 return;
1031
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001032 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001033
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001034 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001035 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001036 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001037 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001038 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001039
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001040 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001041
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001042 buf = tr->trace_buffer.buffer;
1043 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1044 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001046 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001047 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001048}
1049
1050/**
1051 * update_max_tr_single - only copy one trace over, and reset the rest
1052 * @tr - tracer
1053 * @tsk - task with the latency
1054 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001055 *
1056 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001057 */
Ingo Molnare309b412008-05-12 21:20:51 +02001058void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001059update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1060{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001061 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001062
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001063 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001064 return;
1065
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001066 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001067 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001068 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001069 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001070 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001071 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001072
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001073 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001074
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001075 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001076
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001077 if (ret == -EBUSY) {
1078 /*
1079 * We failed to swap the buffer due to a commit taking
1080 * place on this CPU. We fail to record, but we reset
1081 * the max trace buffer (no one writes directly to it)
1082 * and flag that it failed.
1083 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001084 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001085 "Failed to swap buffers due to commit in progress\n");
1086 }
1087
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001088 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001089
1090 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001091 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001092}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001093#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001094
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095static void default_wait_pipe(struct trace_iterator *iter)
1096{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001097 /* Iterators are static, they should be filled or empty */
1098 if (trace_buffer_iter(iter, iter->cpu_file))
1099 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001100
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001101 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001102}
1103
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001104#ifdef CONFIG_FTRACE_STARTUP_TEST
1105static int run_tracer_selftest(struct tracer *type)
1106{
1107 struct trace_array *tr = &global_trace;
1108 struct tracer *saved_tracer = tr->current_trace;
1109 int ret;
1110
1111 if (!type->selftest || tracing_selftest_disabled)
1112 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001113
1114 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001115 * Run a selftest on this tracer.
1116 * Here we reset the trace buffer, and set the current
1117 * tracer to be this tracer. The tracer can then run some
1118 * internal tracing to verify that everything is in order.
1119 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001120 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001121 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001122
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001123 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001124
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001125#ifdef CONFIG_TRACER_MAX_TRACE
1126 if (type->use_max_tr) {
1127 /* If we expanded the buffers, make sure the max is expanded too */
1128 if (ring_buffer_expanded)
1129 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1130 RING_BUFFER_ALL_CPUS);
1131 tr->allocated_snapshot = true;
1132 }
1133#endif
1134
1135 /* the test is responsible for initializing and enabling */
1136 pr_info("Testing tracer %s: ", type->name);
1137 ret = type->selftest(type, tr);
1138 /* the test is responsible for resetting too */
1139 tr->current_trace = saved_tracer;
1140 if (ret) {
1141 printk(KERN_CONT "FAILED!\n");
1142 /* Add the warning after printing 'FAILED' */
1143 WARN_ON(1);
1144 return -1;
1145 }
1146 /* Only reset on passing, to avoid touching corrupted buffers */
1147 tracing_reset_online_cpus(&tr->trace_buffer);
1148
1149#ifdef CONFIG_TRACER_MAX_TRACE
1150 if (type->use_max_tr) {
1151 tr->allocated_snapshot = false;
1152
1153 /* Shrink the max buffer again */
1154 if (ring_buffer_expanded)
1155 ring_buffer_resize(tr->max_buffer.buffer, 1,
1156 RING_BUFFER_ALL_CPUS);
1157 }
1158#endif
1159
1160 printk(KERN_CONT "PASSED\n");
1161 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001163#else
1164static inline int run_tracer_selftest(struct tracer *type)
1165{
1166 return 0;
1167}
1168#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001170/**
1171 * register_tracer - register a tracer with the ftrace system.
1172 * @type - the plugin for the tracer
1173 *
1174 * Register a new plugin tracer.
1175 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001176int register_tracer(struct tracer *type)
1177{
1178 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001179 int ret = 0;
1180
1181 if (!type->name) {
1182 pr_info("Tracer must have a name\n");
1183 return -1;
1184 }
1185
Dan Carpenter24a461d2010-07-10 12:06:44 +02001186 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001187 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1188 return -1;
1189 }
1190
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001191 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001192
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001193 tracing_selftest_running = true;
1194
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001195 for (t = trace_types; t; t = t->next) {
1196 if (strcmp(type->name, t->name) == 0) {
1197 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001198 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001199 type->name);
1200 ret = -1;
1201 goto out;
1202 }
1203 }
1204
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001205 if (!type->set_flag)
1206 type->set_flag = &dummy_set_flag;
1207 if (!type->flags)
1208 type->flags = &dummy_tracer_flags;
1209 else
1210 if (!type->flags->opts)
1211 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001212 if (!type->wait_pipe)
1213 type->wait_pipe = default_wait_pipe;
1214
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001215 ret = run_tracer_selftest(type);
1216 if (ret < 0)
1217 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001218
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001219 type->next = trace_types;
1220 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001221
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001222 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001223 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001224 mutex_unlock(&trace_types_lock);
1225
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 if (ret || !default_bootup_tracer)
1227 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001228
Li Zefanee6c2c12009-09-18 14:06:47 +08001229 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001230 goto out_unlock;
1231
1232 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1233 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001234 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001235 default_bootup_tracer = NULL;
1236 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001237 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001238#ifdef CONFIG_FTRACE_STARTUP_TEST
1239 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1240 type->name);
1241#endif
1242
1243 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001244 return ret;
1245}
1246
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001247void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001248{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001249 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001250
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001251 if (!buffer)
1252 return;
1253
Steven Rostedtf6339032009-09-04 12:35:16 -04001254 ring_buffer_record_disable(buffer);
1255
1256 /* Make sure all commits have finished */
1257 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001258 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001259
1260 ring_buffer_record_enable(buffer);
1261}
1262
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001263void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001264{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001265 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001266 int cpu;
1267
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001268 if (!buffer)
1269 return;
1270
Steven Rostedt621968c2009-09-04 12:02:35 -04001271 ring_buffer_record_disable(buffer);
1272
1273 /* Make sure all commits have finished */
1274 synchronize_sched();
1275
Alexander Z Lam94571582013-08-02 18:36:16 -07001276 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001277
1278 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001279 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001280
1281 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001282}
1283
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001284/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001286{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001287 struct trace_array *tr;
1288
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001289 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001290 tracing_reset_online_cpus(&tr->trace_buffer);
1291#ifdef CONFIG_TRACER_MAX_TRACE
1292 tracing_reset_online_cpus(&tr->max_buffer);
1293#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001294 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001295}
1296
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001297#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001298#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001299static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1300static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1301static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1302static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001303static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001304
Steven Rostedt25b0b442008-05-12 21:21:00 +02001305/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001306static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001307
1308static void trace_init_cmdlines(void)
1309{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001310 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1311 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001312 cmdline_idx = 0;
1313}
1314
Carsten Emdeb5130b12009-09-13 01:43:07 +02001315int is_tracing_stopped(void)
1316{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001317 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001318}
1319
Steven Rostedt0f048702008-11-05 16:05:44 -05001320/**
1321 * tracing_start - quick start of the tracer
1322 *
1323 * If tracing is enabled but was stopped by tracing_stop,
1324 * this will start the tracer back up.
1325 */
1326void tracing_start(void)
1327{
1328 struct ring_buffer *buffer;
1329 unsigned long flags;
1330
1331 if (tracing_disabled)
1332 return;
1333
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001334 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1335 if (--global_trace.stop_count) {
1336 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001337 /* Someone screwed up their debugging */
1338 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001339 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001340 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001341 goto out;
1342 }
1343
Steven Rostedta2f80712010-03-12 19:56:00 -05001344 /* Prevent the buffers from switching */
1345 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001346
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001347 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001348 if (buffer)
1349 ring_buffer_record_enable(buffer);
1350
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001351#ifdef CONFIG_TRACER_MAX_TRACE
1352 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001353 if (buffer)
1354 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001355#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001356
Steven Rostedta2f80712010-03-12 19:56:00 -05001357 arch_spin_unlock(&ftrace_max_lock);
1358
Steven Rostedt0f048702008-11-05 16:05:44 -05001359 ftrace_start();
1360 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001361 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1362}
1363
1364static void tracing_start_tr(struct trace_array *tr)
1365{
1366 struct ring_buffer *buffer;
1367 unsigned long flags;
1368
1369 if (tracing_disabled)
1370 return;
1371
1372 /* If global, we need to also start the max tracer */
1373 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1374 return tracing_start();
1375
1376 raw_spin_lock_irqsave(&tr->start_lock, flags);
1377
1378 if (--tr->stop_count) {
1379 if (tr->stop_count < 0) {
1380 /* Someone screwed up their debugging */
1381 WARN_ON_ONCE(1);
1382 tr->stop_count = 0;
1383 }
1384 goto out;
1385 }
1386
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001387 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001388 if (buffer)
1389 ring_buffer_record_enable(buffer);
1390
1391 out:
1392 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001393}
1394
1395/**
1396 * tracing_stop - quick stop of the tracer
1397 *
1398 * Light weight way to stop tracing. Use in conjunction with
1399 * tracing_start.
1400 */
1401void tracing_stop(void)
1402{
1403 struct ring_buffer *buffer;
1404 unsigned long flags;
1405
1406 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001407 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1408 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001409 goto out;
1410
Steven Rostedta2f80712010-03-12 19:56:00 -05001411 /* Prevent the buffers from switching */
1412 arch_spin_lock(&ftrace_max_lock);
1413
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001414 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001415 if (buffer)
1416 ring_buffer_record_disable(buffer);
1417
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001418#ifdef CONFIG_TRACER_MAX_TRACE
1419 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001420 if (buffer)
1421 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001422#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001423
Steven Rostedta2f80712010-03-12 19:56:00 -05001424 arch_spin_unlock(&ftrace_max_lock);
1425
Steven Rostedt0f048702008-11-05 16:05:44 -05001426 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001427 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1428}
1429
1430static void tracing_stop_tr(struct trace_array *tr)
1431{
1432 struct ring_buffer *buffer;
1433 unsigned long flags;
1434
1435 /* If global, we need to also stop the max tracer */
1436 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1437 return tracing_stop();
1438
1439 raw_spin_lock_irqsave(&tr->start_lock, flags);
1440 if (tr->stop_count++)
1441 goto out;
1442
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001443 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001444 if (buffer)
1445 ring_buffer_record_disable(buffer);
1446
1447 out:
1448 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001449}
1450
Ingo Molnare309b412008-05-12 21:20:51 +02001451void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001452
Ingo Molnare309b412008-05-12 21:20:51 +02001453static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001454{
Carsten Emdea635cf02009-03-18 09:00:41 +01001455 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001456
1457 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1458 return;
1459
1460 /*
1461 * It's not the end of the world if we don't get
1462 * the lock, but we also don't want to spin
1463 * nor do we want to disable interrupts,
1464 * so if we miss here, then better luck next time.
1465 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001466 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001467 return;
1468
1469 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001470 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001471 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1472
Carsten Emdea635cf02009-03-18 09:00:41 +01001473 /*
1474 * Check whether the cmdline buffer at idx has a pid
1475 * mapped. We are going to overwrite that entry so we
1476 * need to clear the map_pid_to_cmdline. Otherwise we
1477 * would read the new comm for the old pid.
1478 */
1479 pid = map_cmdline_to_pid[idx];
1480 if (pid != NO_CMDLINE_MAP)
1481 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482
Carsten Emdea635cf02009-03-18 09:00:41 +01001483 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484 map_pid_to_cmdline[tsk->pid] = idx;
1485
1486 cmdline_idx = idx;
1487 }
1488
1489 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1490
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001491 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001492}
1493
Steven Rostedt4ca530852009-03-16 19:20:15 -04001494void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001496 unsigned map;
1497
Steven Rostedt4ca530852009-03-16 19:20:15 -04001498 if (!pid) {
1499 strcpy(comm, "<idle>");
1500 return;
1501 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001502
Steven Rostedt74bf4072010-01-25 15:11:53 -05001503 if (WARN_ON_ONCE(pid < 0)) {
1504 strcpy(comm, "<XXX>");
1505 return;
1506 }
1507
Steven Rostedt4ca530852009-03-16 19:20:15 -04001508 if (pid > PID_MAX_DEFAULT) {
1509 strcpy(comm, "<...>");
1510 return;
1511 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001512
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001513 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001514 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001516 if (map != NO_CMDLINE_MAP)
1517 strcpy(comm, saved_cmdlines[map]);
1518 else
1519 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001521 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001522 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001523}
1524
Ingo Molnare309b412008-05-12 21:20:51 +02001525void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001527 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528 return;
1529
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001530 if (!__this_cpu_read(trace_cmdline_save))
1531 return;
1532
1533 __this_cpu_write(trace_cmdline_save, false);
1534
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001535 trace_save_cmdline(tsk);
1536}
1537
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001538void
Steven Rostedt38697052008-10-01 13:14:09 -04001539tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1540 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001541{
1542 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001543
Steven Rostedt777e2082008-09-29 23:02:42 -04001544 entry->preempt_count = pc & 0xff;
1545 entry->pid = (tsk) ? tsk->pid : 0;
1546 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001547#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001548 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001549#else
1550 TRACE_FLAG_IRQS_NOSUPPORT |
1551#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001552 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1553 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001554 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1555 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001556}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001557EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001558
Steven Rostedte77405a2009-09-02 14:17:06 -04001559struct ring_buffer_event *
1560trace_buffer_lock_reserve(struct ring_buffer *buffer,
1561 int type,
1562 unsigned long len,
1563 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001564{
1565 struct ring_buffer_event *event;
1566
Steven Rostedte77405a2009-09-02 14:17:06 -04001567 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001568 if (event != NULL) {
1569 struct trace_entry *ent = ring_buffer_event_data(event);
1570
1571 tracing_generic_entry_update(ent, flags, pc);
1572 ent->type = type;
1573 }
1574
1575 return event;
1576}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001577
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001578void
1579__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1580{
1581 __this_cpu_write(trace_cmdline_save, true);
1582 ring_buffer_unlock_commit(buffer, event);
1583}
1584
Steven Rostedte77405a2009-09-02 14:17:06 -04001585static inline void
1586__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1587 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001588 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001589{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001590 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001591
Steven Rostedte77405a2009-09-02 14:17:06 -04001592 ftrace_trace_stack(buffer, flags, 6, pc);
1593 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001594}
1595
Steven Rostedte77405a2009-09-02 14:17:06 -04001596void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1597 struct ring_buffer_event *event,
1598 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001599{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001600 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001601}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001602EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001603
Steven Rostedtef5580d2009-02-27 19:38:04 -05001604struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001605trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1606 struct ftrace_event_file *ftrace_file,
1607 int type, unsigned long len,
1608 unsigned long flags, int pc)
1609{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001610 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001611 return trace_buffer_lock_reserve(*current_rb,
1612 type, len, flags, pc);
1613}
1614EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1615
1616struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001617trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1618 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001619 unsigned long flags, int pc)
1620{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001621 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001622 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001623 type, len, flags, pc);
1624}
Steven Rostedt94487d62009-05-05 19:22:53 -04001625EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001626
Steven Rostedte77405a2009-09-02 14:17:06 -04001627void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1628 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001629 unsigned long flags, int pc)
1630{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001631 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001632}
Steven Rostedt94487d62009-05-05 19:22:53 -04001633EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001634
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001635void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1636 struct ring_buffer_event *event,
1637 unsigned long flags, int pc,
1638 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001639{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001640 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001641
1642 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1643 ftrace_trace_userstack(buffer, flags, pc);
1644}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001645EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001646
Steven Rostedte77405a2009-09-02 14:17:06 -04001647void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1648 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001649{
Steven Rostedte77405a2009-09-02 14:17:06 -04001650 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001651}
Steven Rostedt12acd472009-04-17 16:01:56 -04001652EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001653
Ingo Molnare309b412008-05-12 21:20:51 +02001654void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001655trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001656 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1657 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001658{
Tom Zanussie1112b42009-03-31 00:48:49 -05001659 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001660 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001661 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001662 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001663
Steven Rostedtd7690412008-10-01 00:29:53 -04001664 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001665 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001666 return;
1667
Steven Rostedte77405a2009-09-02 14:17:06 -04001668 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001669 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001670 if (!event)
1671 return;
1672 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001673 entry->ip = ip;
1674 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001675
Tom Zanussif306cc82013-10-24 08:34:17 -05001676 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001677 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001678}
1679
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001680#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001681
1682#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1683struct ftrace_stack {
1684 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1685};
1686
1687static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1688static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1689
Steven Rostedte77405a2009-09-02 14:17:06 -04001690static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001691 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001692 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001693{
Tom Zanussie1112b42009-03-31 00:48:49 -05001694 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001695 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001696 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001697 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001698 int use_stack;
1699 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001700
1701 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001702 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001703
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001704 /*
1705 * Since events can happen in NMIs there's no safe way to
1706 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1707 * or NMI comes in, it will just have to use the default
1708 * FTRACE_STACK_SIZE.
1709 */
1710 preempt_disable_notrace();
1711
Shan Wei82146522012-11-19 13:21:01 +08001712 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001713 /*
1714 * We don't need any atomic variables, just a barrier.
1715 * If an interrupt comes in, we don't care, because it would
1716 * have exited and put the counter back to what we want.
1717 * We just need a barrier to keep gcc from moving things
1718 * around.
1719 */
1720 barrier();
1721 if (use_stack == 1) {
1722 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1723 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1724
1725 if (regs)
1726 save_stack_trace_regs(regs, &trace);
1727 else
1728 save_stack_trace(&trace);
1729
1730 if (trace.nr_entries > size)
1731 size = trace.nr_entries;
1732 } else
1733 /* From now on, use_stack is a boolean */
1734 use_stack = 0;
1735
1736 size *= sizeof(unsigned long);
1737
1738 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1739 sizeof(*entry) + size, flags, pc);
1740 if (!event)
1741 goto out;
1742 entry = ring_buffer_event_data(event);
1743
1744 memset(&entry->caller, 0, size);
1745
1746 if (use_stack)
1747 memcpy(&entry->caller, trace.entries,
1748 trace.nr_entries * sizeof(unsigned long));
1749 else {
1750 trace.max_entries = FTRACE_STACK_ENTRIES;
1751 trace.entries = entry->caller;
1752 if (regs)
1753 save_stack_trace_regs(regs, &trace);
1754 else
1755 save_stack_trace(&trace);
1756 }
1757
1758 entry->size = trace.nr_entries;
1759
Tom Zanussif306cc82013-10-24 08:34:17 -05001760 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001761 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001762
1763 out:
1764 /* Again, don't let gcc optimize things here */
1765 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001766 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001767 preempt_enable_notrace();
1768
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001769}
1770
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001771void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1772 int skip, int pc, struct pt_regs *regs)
1773{
1774 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1775 return;
1776
1777 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1778}
1779
Steven Rostedte77405a2009-09-02 14:17:06 -04001780void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1781 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001782{
1783 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1784 return;
1785
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001786 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001787}
1788
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001789void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1790 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001791{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001792 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001793}
1794
Steven Rostedt03889382009-12-11 09:48:22 -05001795/**
1796 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001797 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001798 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001799void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001800{
1801 unsigned long flags;
1802
1803 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001804 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001805
1806 local_save_flags(flags);
1807
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001808 /*
1809 * Skip 3 more, seems to get us at the caller of
1810 * this function.
1811 */
1812 skip += 3;
1813 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1814 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001815}
1816
Steven Rostedt91e86e52010-11-10 12:56:12 +01001817static DEFINE_PER_CPU(int, user_stack_count);
1818
Steven Rostedte77405a2009-09-02 14:17:06 -04001819void
1820ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001821{
Tom Zanussie1112b42009-03-31 00:48:49 -05001822 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001823 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001824 struct userstack_entry *entry;
1825 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001826
1827 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1828 return;
1829
Steven Rostedtb6345872010-03-12 20:03:30 -05001830 /*
1831 * NMIs can not handle page faults, even with fix ups.
1832 * The save user stack can (and often does) fault.
1833 */
1834 if (unlikely(in_nmi()))
1835 return;
1836
Steven Rostedt91e86e52010-11-10 12:56:12 +01001837 /*
1838 * prevent recursion, since the user stack tracing may
1839 * trigger other kernel events.
1840 */
1841 preempt_disable();
1842 if (__this_cpu_read(user_stack_count))
1843 goto out;
1844
1845 __this_cpu_inc(user_stack_count);
1846
Steven Rostedte77405a2009-09-02 14:17:06 -04001847 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001848 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001849 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001850 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001851 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001852
Steven Rostedt48659d32009-09-11 11:36:23 -04001853 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001854 memset(&entry->caller, 0, sizeof(entry->caller));
1855
1856 trace.nr_entries = 0;
1857 trace.max_entries = FTRACE_STACK_ENTRIES;
1858 trace.skip = 0;
1859 trace.entries = entry->caller;
1860
1861 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001862 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001863 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001864
Li Zefan1dbd1952010-12-09 15:47:56 +08001865 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001866 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001867 out:
1868 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001869}
1870
Hannes Eder4fd27352009-02-10 19:44:12 +01001871#ifdef UNUSED
1872static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001873{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001874 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001875}
Hannes Eder4fd27352009-02-10 19:44:12 +01001876#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001877
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001878#endif /* CONFIG_STACKTRACE */
1879
Steven Rostedt07d777f2011-09-22 14:01:55 -04001880/* created for use with alloc_percpu */
1881struct trace_buffer_struct {
1882 char buffer[TRACE_BUF_SIZE];
1883};
1884
1885static struct trace_buffer_struct *trace_percpu_buffer;
1886static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1887static struct trace_buffer_struct *trace_percpu_irq_buffer;
1888static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1889
1890/*
1891 * The buffer used is dependent on the context. There is a per cpu
1892 * buffer for normal context, softirq contex, hard irq context and
1893 * for NMI context. Thise allows for lockless recording.
1894 *
1895 * Note, if the buffers failed to be allocated, then this returns NULL
1896 */
1897static char *get_trace_buf(void)
1898{
1899 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001900
1901 /*
1902 * If we have allocated per cpu buffers, then we do not
1903 * need to do any locking.
1904 */
1905 if (in_nmi())
1906 percpu_buffer = trace_percpu_nmi_buffer;
1907 else if (in_irq())
1908 percpu_buffer = trace_percpu_irq_buffer;
1909 else if (in_softirq())
1910 percpu_buffer = trace_percpu_sirq_buffer;
1911 else
1912 percpu_buffer = trace_percpu_buffer;
1913
1914 if (!percpu_buffer)
1915 return NULL;
1916
Shan Weid8a03492012-11-13 09:53:04 +08001917 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001918}
1919
1920static int alloc_percpu_trace_buffer(void)
1921{
1922 struct trace_buffer_struct *buffers;
1923 struct trace_buffer_struct *sirq_buffers;
1924 struct trace_buffer_struct *irq_buffers;
1925 struct trace_buffer_struct *nmi_buffers;
1926
1927 buffers = alloc_percpu(struct trace_buffer_struct);
1928 if (!buffers)
1929 goto err_warn;
1930
1931 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1932 if (!sirq_buffers)
1933 goto err_sirq;
1934
1935 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1936 if (!irq_buffers)
1937 goto err_irq;
1938
1939 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1940 if (!nmi_buffers)
1941 goto err_nmi;
1942
1943 trace_percpu_buffer = buffers;
1944 trace_percpu_sirq_buffer = sirq_buffers;
1945 trace_percpu_irq_buffer = irq_buffers;
1946 trace_percpu_nmi_buffer = nmi_buffers;
1947
1948 return 0;
1949
1950 err_nmi:
1951 free_percpu(irq_buffers);
1952 err_irq:
1953 free_percpu(sirq_buffers);
1954 err_sirq:
1955 free_percpu(buffers);
1956 err_warn:
1957 WARN(1, "Could not allocate percpu trace_printk buffer");
1958 return -ENOMEM;
1959}
1960
Steven Rostedt81698832012-10-11 10:15:05 -04001961static int buffers_allocated;
1962
Steven Rostedt07d777f2011-09-22 14:01:55 -04001963void trace_printk_init_buffers(void)
1964{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001965 if (buffers_allocated)
1966 return;
1967
1968 if (alloc_percpu_trace_buffer())
1969 return;
1970
1971 pr_info("ftrace: Allocated trace_printk buffers\n");
1972
Steven Rostedtb382ede62012-10-10 21:44:34 -04001973 /* Expand the buffers to set size */
1974 tracing_update_buffers();
1975
Steven Rostedt07d777f2011-09-22 14:01:55 -04001976 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001977
1978 /*
1979 * trace_printk_init_buffers() can be called by modules.
1980 * If that happens, then we need to start cmdline recording
1981 * directly here. If the global_trace.buffer is already
1982 * allocated here, then this was called by module code.
1983 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001984 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04001985 tracing_start_cmdline_record();
1986}
1987
1988void trace_printk_start_comm(void)
1989{
1990 /* Start tracing comms if trace printk is set */
1991 if (!buffers_allocated)
1992 return;
1993 tracing_start_cmdline_record();
1994}
1995
1996static void trace_printk_start_stop_comm(int enabled)
1997{
1998 if (!buffers_allocated)
1999 return;
2000
2001 if (enabled)
2002 tracing_start_cmdline_record();
2003 else
2004 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002005}
2006
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002007/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002008 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002009 *
2010 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002011int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002012{
Tom Zanussie1112b42009-03-31 00:48:49 -05002013 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002014 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002015 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002016 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002017 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002018 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002019 char *tbuffer;
2020 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002021
2022 if (unlikely(tracing_selftest_running || tracing_disabled))
2023 return 0;
2024
2025 /* Don't pollute graph traces with trace_vprintk internals */
2026 pause_graph_tracing();
2027
2028 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002029 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002030
Steven Rostedt07d777f2011-09-22 14:01:55 -04002031 tbuffer = get_trace_buf();
2032 if (!tbuffer) {
2033 len = 0;
2034 goto out;
2035 }
2036
2037 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2038
2039 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002040 goto out;
2041
Steven Rostedt07d777f2011-09-22 14:01:55 -04002042 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002043 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002044 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002045 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2046 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002047 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002048 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002049 entry = ring_buffer_event_data(event);
2050 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002051 entry->fmt = fmt;
2052
Steven Rostedt07d777f2011-09-22 14:01:55 -04002053 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002054 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002055 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002056 ftrace_trace_stack(buffer, flags, 6, pc);
2057 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002058
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002059out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002060 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002061 unpause_graph_tracing();
2062
2063 return len;
2064}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002065EXPORT_SYMBOL_GPL(trace_vbprintk);
2066
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002067static int
2068__trace_array_vprintk(struct ring_buffer *buffer,
2069 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002070{
Tom Zanussie1112b42009-03-31 00:48:49 -05002071 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002072 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002073 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002074 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002075 unsigned long flags;
2076 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002077
2078 if (tracing_disabled || tracing_selftest_running)
2079 return 0;
2080
Steven Rostedt07d777f2011-09-22 14:01:55 -04002081 /* Don't pollute graph traces with trace_vprintk internals */
2082 pause_graph_tracing();
2083
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002084 pc = preempt_count();
2085 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002086
Steven Rostedt07d777f2011-09-22 14:01:55 -04002087
2088 tbuffer = get_trace_buf();
2089 if (!tbuffer) {
2090 len = 0;
2091 goto out;
2092 }
2093
2094 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2095 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002096 goto out;
2097
Steven Rostedt07d777f2011-09-22 14:01:55 -04002098 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002099 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002100 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002101 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002102 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002103 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002104 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002105 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002106
Steven Rostedt07d777f2011-09-22 14:01:55 -04002107 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002108 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002109 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002110 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002111 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002112 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002113 out:
2114 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002115 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002116
2117 return len;
2118}
Steven Rostedt659372d2009-09-03 19:11:07 -04002119
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002120int trace_array_vprintk(struct trace_array *tr,
2121 unsigned long ip, const char *fmt, va_list args)
2122{
2123 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2124}
2125
2126int trace_array_printk(struct trace_array *tr,
2127 unsigned long ip, const char *fmt, ...)
2128{
2129 int ret;
2130 va_list ap;
2131
2132 if (!(trace_flags & TRACE_ITER_PRINTK))
2133 return 0;
2134
2135 va_start(ap, fmt);
2136 ret = trace_array_vprintk(tr, ip, fmt, ap);
2137 va_end(ap);
2138 return ret;
2139}
2140
2141int trace_array_printk_buf(struct ring_buffer *buffer,
2142 unsigned long ip, const char *fmt, ...)
2143{
2144 int ret;
2145 va_list ap;
2146
2147 if (!(trace_flags & TRACE_ITER_PRINTK))
2148 return 0;
2149
2150 va_start(ap, fmt);
2151 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2152 va_end(ap);
2153 return ret;
2154}
2155
Steven Rostedt659372d2009-09-03 19:11:07 -04002156int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2157{
Steven Rostedta813a152009-10-09 01:41:35 -04002158 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002159}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002160EXPORT_SYMBOL_GPL(trace_vprintk);
2161
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002162static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002163{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002164 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2165
Steven Rostedt5a90f572008-09-03 17:42:51 -04002166 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002167 if (buf_iter)
2168 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002169}
2170
Ingo Molnare309b412008-05-12 21:20:51 +02002171static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002172peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2173 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002174{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002175 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002176 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002177
Steven Rostedtd7690412008-10-01 00:29:53 -04002178 if (buf_iter)
2179 event = ring_buffer_iter_peek(buf_iter, ts);
2180 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002181 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002182 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002183
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002184 if (event) {
2185 iter->ent_size = ring_buffer_event_length(event);
2186 return ring_buffer_event_data(event);
2187 }
2188 iter->ent_size = 0;
2189 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002190}
Steven Rostedtd7690412008-10-01 00:29:53 -04002191
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002192static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002193__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2194 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002195{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002196 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002197 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002198 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002199 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002200 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002201 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002202 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002203 int cpu;
2204
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002205 /*
2206 * If we are in a per_cpu trace file, don't bother by iterating over
2207 * all cpu and peek directly.
2208 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002209 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002210 if (ring_buffer_empty_cpu(buffer, cpu_file))
2211 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002212 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002213 if (ent_cpu)
2214 *ent_cpu = cpu_file;
2215
2216 return ent;
2217 }
2218
Steven Rostedtab464282008-05-12 21:21:00 +02002219 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002220
2221 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002222 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002223
Steven Rostedtbc21b472010-03-31 19:49:26 -04002224 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002225
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02002226 /*
2227 * Pick the entry with the smallest timestamp:
2228 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002229 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002230 next = ent;
2231 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002232 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002233 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002234 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002235 }
2236 }
2237
Steven Rostedt12b5da32012-03-27 10:43:28 -04002238 iter->ent_size = next_size;
2239
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002240 if (ent_cpu)
2241 *ent_cpu = next_cpu;
2242
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002243 if (ent_ts)
2244 *ent_ts = next_ts;
2245
Steven Rostedtbc21b472010-03-31 19:49:26 -04002246 if (missing_events)
2247 *missing_events = next_lost;
2248
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002249 return next;
2250}
2251
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002252/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002253struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2254 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002255{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002256 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002257}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002258
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002259/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002260void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002261{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002262 iter->ent = __find_next_entry(iter, &iter->cpu,
2263 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002264
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002265 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002266 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002268 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002269}
2270
Ingo Molnare309b412008-05-12 21:20:51 +02002271static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002272{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002273 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002274 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002275}
2276
Ingo Molnare309b412008-05-12 21:20:51 +02002277static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002278{
2279 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002280 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002281 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002282
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002283 WARN_ON_ONCE(iter->leftover);
2284
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002285 (*pos)++;
2286
2287 /* can't go backwards */
2288 if (iter->idx > i)
2289 return NULL;
2290
2291 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002292 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002293 else
2294 ent = iter;
2295
2296 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002297 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002298
2299 iter->pos = *pos;
2300
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002301 return ent;
2302}
2303
Jason Wessel955b61e2010-08-05 09:22:23 -05002304void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002305{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002306 struct ring_buffer_event *event;
2307 struct ring_buffer_iter *buf_iter;
2308 unsigned long entries = 0;
2309 u64 ts;
2310
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002311 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002312
Steven Rostedt6d158a82012-06-27 20:46:14 -04002313 buf_iter = trace_buffer_iter(iter, cpu);
2314 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002315 return;
2316
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002317 ring_buffer_iter_reset(buf_iter);
2318
2319 /*
2320 * We could have the case with the max latency tracers
2321 * that a reset never took place on a cpu. This is evident
2322 * by the timestamp being before the start of the buffer.
2323 */
2324 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002325 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002326 break;
2327 entries++;
2328 ring_buffer_read(buf_iter, NULL);
2329 }
2330
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002331 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002332}
2333
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002334/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002335 * The current tracer is copied to avoid a global locking
2336 * all around.
2337 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002338static void *s_start(struct seq_file *m, loff_t *pos)
2339{
2340 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002341 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002342 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002343 void *p = NULL;
2344 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002345 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002346
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002347 /*
2348 * copy the tracer to avoid using a global lock all around.
2349 * iter->trace is a copy of current_trace, the pointer to the
2350 * name may be used instead of a strcmp(), as iter->trace->name
2351 * will point to the same string as current_trace->name.
2352 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002354 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2355 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002356 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002357
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002358#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002359 if (iter->snapshot && iter->trace->use_max_tr)
2360 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002361#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002362
2363 if (!iter->snapshot)
2364 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002365
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002366 if (*pos != iter->pos) {
2367 iter->ent = NULL;
2368 iter->cpu = 0;
2369 iter->idx = -1;
2370
Steven Rostedtae3b5092013-01-23 15:22:59 -05002371 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002372 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002373 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002374 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002375 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002376
Lai Jiangshanac91d852010-03-02 17:54:50 +08002377 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2379 ;
2380
2381 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002382 /*
2383 * If we overflowed the seq_file before, then we want
2384 * to just reuse the trace_seq buffer again.
2385 */
2386 if (iter->leftover)
2387 p = iter;
2388 else {
2389 l = *pos - 1;
2390 p = s_next(m, p, &l);
2391 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002392 }
2393
Lai Jiangshan4f535962009-05-18 19:35:34 +08002394 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002395 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002396 return p;
2397}
2398
2399static void s_stop(struct seq_file *m, void *p)
2400{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002401 struct trace_iterator *iter = m->private;
2402
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002403#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002404 if (iter->snapshot && iter->trace->use_max_tr)
2405 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002406#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002407
2408 if (!iter->snapshot)
2409 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002410
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002411 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002412 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413}
2414
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002415static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002416get_total_entries(struct trace_buffer *buf,
2417 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002418{
2419 unsigned long count;
2420 int cpu;
2421
2422 *total = 0;
2423 *entries = 0;
2424
2425 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002426 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002427 /*
2428 * If this buffer has skipped entries, then we hold all
2429 * entries for the trace and we need to ignore the
2430 * ones before the time stamp.
2431 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002432 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2433 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002434 /* total is the same as the entries */
2435 *total += count;
2436 } else
2437 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002438 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002439 *entries += count;
2440 }
2441}
2442
Ingo Molnare309b412008-05-12 21:20:51 +02002443static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002444{
Michael Ellermana6168352008-08-20 16:36:11 -07002445 seq_puts(m, "# _------=> CPU# \n");
2446 seq_puts(m, "# / _-----=> irqs-off \n");
2447 seq_puts(m, "# | / _----=> need-resched \n");
2448 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2449 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002450 seq_puts(m, "# |||| / delay \n");
2451 seq_puts(m, "# cmd pid ||||| time | caller \n");
2452 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453}
2454
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002455static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002456{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002457 unsigned long total;
2458 unsigned long entries;
2459
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002460 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002461 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2462 entries, total, num_online_cpus());
2463 seq_puts(m, "#\n");
2464}
2465
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002466static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002467{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002468 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002469 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002470 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002471}
2472
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002473static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002474{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002475 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002476 seq_puts(m, "# _-----=> irqs-off\n");
2477 seq_puts(m, "# / _----=> need-resched\n");
2478 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2479 seq_puts(m, "# || / _--=> preempt-depth\n");
2480 seq_puts(m, "# ||| / delay\n");
2481 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2482 seq_puts(m, "# | | | |||| | |\n");
2483}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002484
Jiri Olsa62b915f2010-04-02 19:01:22 +02002485void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002486print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2487{
2488 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002489 struct trace_buffer *buf = iter->trace_buffer;
2490 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002491 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002492 unsigned long entries;
2493 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002494 const char *name = "preemption";
2495
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002496 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002497
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002498 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002499
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002500 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002501 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002502 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002503 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002504 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002505 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002506 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002507 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002508 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002509 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002510#if defined(CONFIG_PREEMPT_NONE)
2511 "server",
2512#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2513 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002514#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002515 "preempt",
2516#else
2517 "unknown",
2518#endif
2519 /* These are reserved for later use */
2520 0, 0, 0, 0);
2521#ifdef CONFIG_SMP
2522 seq_printf(m, " #P:%d)\n", num_online_cpus());
2523#else
2524 seq_puts(m, ")\n");
2525#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002526 seq_puts(m, "# -----------------\n");
2527 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002529 data->comm, data->pid,
2530 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002531 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002532 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002533
2534 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002535 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002536 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2537 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002538 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002539 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2540 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002541 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002542 }
2543
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002544 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002545}
2546
Steven Rostedta3097202008-11-07 22:36:02 -05002547static void test_cpu_buff_start(struct trace_iterator *iter)
2548{
2549 struct trace_seq *s = &iter->seq;
2550
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002551 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2552 return;
2553
2554 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2555 return;
2556
Rusty Russell44623442009-01-01 10:12:23 +10302557 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002558 return;
2559
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002560 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002561 return;
2562
Rusty Russell44623442009-01-01 10:12:23 +10302563 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002564
2565 /* Don't print started cpu buffer for the first entry of the trace */
2566 if (iter->idx > 1)
2567 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2568 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002569}
2570
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002571static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572{
Steven Rostedt214023c2008-05-12 21:20:46 +02002573 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002575 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002576 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002577
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002578 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002579
Steven Rostedta3097202008-11-07 22:36:02 -05002580 test_cpu_buff_start(iter);
2581
Steven Rostedtf633cef2008-12-23 23:24:13 -05002582 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002583
2584 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002585 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2586 if (!trace_print_lat_context(iter))
2587 goto partial;
2588 } else {
2589 if (!trace_print_context(iter))
2590 goto partial;
2591 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002592 }
2593
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002594 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002595 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002596
2597 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2598 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002599
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002600 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002601partial:
2602 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002603}
2604
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002605static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002606{
2607 struct trace_seq *s = &iter->seq;
2608 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002609 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002610
2611 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002612
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002613 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002614 if (!trace_seq_printf(s, "%d %d %llu ",
2615 entry->pid, iter->cpu, iter->ts))
2616 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002617 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002618
Steven Rostedtf633cef2008-12-23 23:24:13 -05002619 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002620 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002621 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002622
2623 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2624 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002625
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002626 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002627partial:
2628 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002629}
2630
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002631static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002632{
2633 struct trace_seq *s = &iter->seq;
2634 unsigned char newline = '\n';
2635 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002636 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002637
2638 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002639
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002640 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2641 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2642 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2643 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2644 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002645
Steven Rostedtf633cef2008-12-23 23:24:13 -05002646 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002647 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002648 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002649 if (ret != TRACE_TYPE_HANDLED)
2650 return ret;
2651 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002652
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002653 SEQ_PUT_FIELD_RET(s, newline);
2654
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002655 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002656}
2657
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002658static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002659{
2660 struct trace_seq *s = &iter->seq;
2661 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002662 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002663
2664 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002665
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002666 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2667 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b522009-02-07 19:38:43 -05002668 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002669 SEQ_PUT_FIELD_RET(s, iter->ts);
2670 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002671
Steven Rostedtf633cef2008-12-23 23:24:13 -05002672 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002673 return event ? event->funcs->binary(iter, 0, event) :
2674 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002675}
2676
Jiri Olsa62b915f2010-04-02 19:01:22 +02002677int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002678{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002679 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002680 int cpu;
2681
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002682 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002683 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002684 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002685 buf_iter = trace_buffer_iter(iter, cpu);
2686 if (buf_iter) {
2687 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002688 return 0;
2689 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002690 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002691 return 0;
2692 }
2693 return 1;
2694 }
2695
Steven Rostedtab464282008-05-12 21:21:00 +02002696 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002697 buf_iter = trace_buffer_iter(iter, cpu);
2698 if (buf_iter) {
2699 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002700 return 0;
2701 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002702 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002703 return 0;
2704 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002705 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002706
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002707 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002708}
2709
Lai Jiangshan4f535962009-05-18 19:35:34 +08002710/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002711enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002712{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002713 enum print_line_t ret;
2714
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002715 if (iter->lost_events &&
2716 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2717 iter->cpu, iter->lost_events))
2718 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002719
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002720 if (iter->trace && iter->trace->print_line) {
2721 ret = iter->trace->print_line(iter);
2722 if (ret != TRACE_TYPE_UNHANDLED)
2723 return ret;
2724 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002725
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002726 if (iter->ent->type == TRACE_BPUTS &&
2727 trace_flags & TRACE_ITER_PRINTK &&
2728 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2729 return trace_print_bputs_msg_only(iter);
2730
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002731 if (iter->ent->type == TRACE_BPRINT &&
2732 trace_flags & TRACE_ITER_PRINTK &&
2733 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002734 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002735
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002736 if (iter->ent->type == TRACE_PRINT &&
2737 trace_flags & TRACE_ITER_PRINTK &&
2738 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002739 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002740
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002741 if (trace_flags & TRACE_ITER_BIN)
2742 return print_bin_fmt(iter);
2743
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002744 if (trace_flags & TRACE_ITER_HEX)
2745 return print_hex_fmt(iter);
2746
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002747 if (trace_flags & TRACE_ITER_RAW)
2748 return print_raw_fmt(iter);
2749
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002750 return print_trace_fmt(iter);
2751}
2752
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002753void trace_latency_header(struct seq_file *m)
2754{
2755 struct trace_iterator *iter = m->private;
2756
2757 /* print nothing if the buffers are empty */
2758 if (trace_empty(iter))
2759 return;
2760
2761 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2762 print_trace_header(m, iter);
2763
2764 if (!(trace_flags & TRACE_ITER_VERBOSE))
2765 print_lat_help_header(m);
2766}
2767
Jiri Olsa62b915f2010-04-02 19:01:22 +02002768void trace_default_header(struct seq_file *m)
2769{
2770 struct trace_iterator *iter = m->private;
2771
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002772 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2773 return;
2774
Jiri Olsa62b915f2010-04-02 19:01:22 +02002775 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2776 /* print nothing if the buffers are empty */
2777 if (trace_empty(iter))
2778 return;
2779 print_trace_header(m, iter);
2780 if (!(trace_flags & TRACE_ITER_VERBOSE))
2781 print_lat_help_header(m);
2782 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002783 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2784 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002785 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002786 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002787 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002788 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002789 }
2790}
2791
Steven Rostedte0a413f2011-09-29 21:26:16 -04002792static void test_ftrace_alive(struct seq_file *m)
2793{
2794 if (!ftrace_is_dead())
2795 return;
2796 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2797 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2798}
2799
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002800#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002801static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002802{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002803 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2804 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2805 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002806 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002807 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2808 seq_printf(m, "# is not a '0' or '1')\n");
2809}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002810
2811static void show_snapshot_percpu_help(struct seq_file *m)
2812{
2813 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2814#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2815 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2816 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2817#else
2818 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2819 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2820#endif
2821 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2822 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2823 seq_printf(m, "# is not a '0' or '1')\n");
2824}
2825
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002826static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2827{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002828 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002829 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2830 else
2831 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2832
2833 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002834 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2835 show_snapshot_main_help(m);
2836 else
2837 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002838}
2839#else
2840/* Should never be called */
2841static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2842#endif
2843
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002844static int s_show(struct seq_file *m, void *v)
2845{
2846 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002847 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002848
2849 if (iter->ent == NULL) {
2850 if (iter->tr) {
2851 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2852 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002853 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002854 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002855 if (iter->snapshot && trace_empty(iter))
2856 print_snapshot_help(m, iter);
2857 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002858 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002859 else
2860 trace_default_header(m);
2861
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002862 } else if (iter->leftover) {
2863 /*
2864 * If we filled the seq_file buffer earlier, we
2865 * want to just show it now.
2866 */
2867 ret = trace_print_seq(m, &iter->seq);
2868
2869 /* ret should this time be zero, but you never know */
2870 iter->leftover = ret;
2871
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002872 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002873 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002874 ret = trace_print_seq(m, &iter->seq);
2875 /*
2876 * If we overflow the seq_file buffer, then it will
2877 * ask us for this data again at start up.
2878 * Use that instead.
2879 * ret is 0 if seq_file write succeeded.
2880 * -1 otherwise.
2881 */
2882 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002883 }
2884
2885 return 0;
2886}
2887
Oleg Nesterov649e9c702013-07-23 17:25:54 +02002888/*
2889 * Should be used after trace_array_get(), trace_types_lock
2890 * ensures that i_cdev was already initialized.
2891 */
2892static inline int tracing_get_cpu(struct inode *inode)
2893{
2894 if (inode->i_cdev) /* See trace_create_cpu_file() */
2895 return (long)inode->i_cdev - 1;
2896 return RING_BUFFER_ALL_CPUS;
2897}
2898
James Morris88e9d342009-09-22 16:43:43 -07002899static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002900 .start = s_start,
2901 .next = s_next,
2902 .stop = s_stop,
2903 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002904};
2905
Ingo Molnare309b412008-05-12 21:20:51 +02002906static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002907__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002908{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002909 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002910 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002911 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002912
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002913 if (tracing_disabled)
2914 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002915
Jiri Olsa50e18b92012-04-25 10:23:39 +02002916 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002917 if (!iter)
2918 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002919
Steven Rostedt6d158a82012-06-27 20:46:14 -04002920 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2921 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002922 if (!iter->buffer_iter)
2923 goto release;
2924
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002925 /*
2926 * We make a copy of the current tracer to avoid concurrent
2927 * changes on it while we are reading.
2928 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002929 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002930 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002931 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002932 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002933
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002934 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002935
Li Zefan79f55992009-06-15 14:58:26 +08002936 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002937 goto fail;
2938
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002939 iter->tr = tr;
2940
2941#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002942 /* Currently only the top directory has a snapshot */
2943 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002944 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002945 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002946#endif
2947 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002948 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002949 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02002950 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002951 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002952
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002953 /* Notify the tracer early; before we stop tracing. */
2954 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002955 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002956
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002957 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002958 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002959 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2960
David Sharp8be07092012-11-13 12:18:22 -08002961 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09002962 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08002963 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2964
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002965 /* stop the trace while dumping if we are not opening "snapshot" */
2966 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002967 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002968
Steven Rostedtae3b5092013-01-23 15:22:59 -05002969 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002970 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002971 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002972 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002973 }
2974 ring_buffer_read_prepare_sync();
2975 for_each_tracing_cpu(cpu) {
2976 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002977 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002978 }
2979 } else {
2980 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002981 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002982 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002983 ring_buffer_read_prepare_sync();
2984 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002985 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002986 }
2987
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002988 mutex_unlock(&trace_types_lock);
2989
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002990 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002991
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002992 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002993 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002994 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04002995 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002996release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02002997 seq_release_private(inode, file);
2998 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002999}
3000
3001int tracing_open_generic(struct inode *inode, struct file *filp)
3002{
Steven Rostedt60a11772008-05-12 21:20:44 +02003003 if (tracing_disabled)
3004 return -ENODEV;
3005
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003006 filp->private_data = inode->i_private;
3007 return 0;
3008}
3009
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003010bool tracing_is_disabled(void)
3011{
3012 return (tracing_disabled) ? true: false;
3013}
3014
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003015/*
3016 * Open and update trace_array ref count.
3017 * Must have the current trace_array passed to it.
3018 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003019static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003020{
3021 struct trace_array *tr = inode->i_private;
3022
3023 if (tracing_disabled)
3024 return -ENODEV;
3025
3026 if (trace_array_get(tr) < 0)
3027 return -ENODEV;
3028
3029 filp->private_data = inode->i_private;
3030
3031 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003032}
3033
Hannes Eder4fd27352009-02-10 19:44:12 +01003034static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003035{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003036 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003037 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003038 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003039 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003040
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003041 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003042 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003043 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003044 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003045
Oleg Nesterov6484c712013-07-23 17:26:10 +02003046 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003047 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003048 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003049
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003050 for_each_tracing_cpu(cpu) {
3051 if (iter->buffer_iter[cpu])
3052 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3053 }
3054
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003055 if (iter->trace && iter->trace->close)
3056 iter->trace->close(iter);
3057
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003058 if (!iter->snapshot)
3059 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003060 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003061
3062 __trace_array_put(tr);
3063
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003064 mutex_unlock(&trace_types_lock);
3065
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003066 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003067 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003068 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003069 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003070 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003071
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003072 return 0;
3073}
3074
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003075static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3076{
3077 struct trace_array *tr = inode->i_private;
3078
3079 trace_array_put(tr);
3080 return 0;
3081}
3082
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003083static int tracing_single_release_tr(struct inode *inode, struct file *file)
3084{
3085 struct trace_array *tr = inode->i_private;
3086
3087 trace_array_put(tr);
3088
3089 return single_release(inode, file);
3090}
3091
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003092static int tracing_open(struct inode *inode, struct file *file)
3093{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003094 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003095 struct trace_iterator *iter;
3096 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003097
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003098 if (trace_array_get(tr) < 0)
3099 return -ENODEV;
3100
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003101 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003102 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3103 int cpu = tracing_get_cpu(inode);
3104
3105 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003106 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003107 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003108 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003109 }
3110
3111 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003112 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003113 if (IS_ERR(iter))
3114 ret = PTR_ERR(iter);
3115 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3116 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3117 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003118
3119 if (ret < 0)
3120 trace_array_put(tr);
3121
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003122 return ret;
3123}
3124
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003125/*
3126 * Some tracers are not suitable for instance buffers.
3127 * A tracer is always available for the global array (toplevel)
3128 * or if it explicitly states that it is.
3129 */
3130static bool
3131trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3132{
3133 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3134}
3135
3136/* Find the next tracer that this trace array may use */
3137static struct tracer *
3138get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3139{
3140 while (t && !trace_ok_for_array(t, tr))
3141 t = t->next;
3142
3143 return t;
3144}
3145
Ingo Molnare309b412008-05-12 21:20:51 +02003146static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003147t_next(struct seq_file *m, void *v, loff_t *pos)
3148{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003149 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003150 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003151
3152 (*pos)++;
3153
3154 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003155 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003156
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003157 return t;
3158}
3159
3160static void *t_start(struct seq_file *m, loff_t *pos)
3161{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003162 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003163 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003164 loff_t l = 0;
3165
3166 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003167
3168 t = get_tracer_for_array(tr, trace_types);
3169 for (; t && l < *pos; t = t_next(m, t, &l))
3170 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003171
3172 return t;
3173}
3174
3175static void t_stop(struct seq_file *m, void *p)
3176{
3177 mutex_unlock(&trace_types_lock);
3178}
3179
3180static int t_show(struct seq_file *m, void *v)
3181{
3182 struct tracer *t = v;
3183
3184 if (!t)
3185 return 0;
3186
3187 seq_printf(m, "%s", t->name);
3188 if (t->next)
3189 seq_putc(m, ' ');
3190 else
3191 seq_putc(m, '\n');
3192
3193 return 0;
3194}
3195
James Morris88e9d342009-09-22 16:43:43 -07003196static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003197 .start = t_start,
3198 .next = t_next,
3199 .stop = t_stop,
3200 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003201};
3202
3203static int show_traces_open(struct inode *inode, struct file *file)
3204{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003205 struct trace_array *tr = inode->i_private;
3206 struct seq_file *m;
3207 int ret;
3208
Steven Rostedt60a11772008-05-12 21:20:44 +02003209 if (tracing_disabled)
3210 return -ENODEV;
3211
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003212 ret = seq_open(file, &show_traces_seq_ops);
3213 if (ret)
3214 return ret;
3215
3216 m = file->private_data;
3217 m->private = tr;
3218
3219 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003220}
3221
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003222static ssize_t
3223tracing_write_stub(struct file *filp, const char __user *ubuf,
3224 size_t count, loff_t *ppos)
3225{
3226 return count;
3227}
3228
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003229loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003230{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003231 int ret;
3232
Slava Pestov364829b2010-11-24 15:13:16 -08003233 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003234 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003235 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003236 file->f_pos = ret = 0;
3237
3238 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003239}
3240
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003241static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003242 .open = tracing_open,
3243 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003244 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003245 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003246 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003247};
3248
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003249static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003250 .open = show_traces_open,
3251 .read = seq_read,
3252 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003253 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003254};
3255
Ingo Molnar36dfe922008-05-12 21:20:52 +02003256/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003257 * The tracer itself will not take this lock, but still we want
3258 * to provide a consistent cpumask to user-space:
3259 */
3260static DEFINE_MUTEX(tracing_cpumask_update_lock);
3261
3262/*
3263 * Temporary storage for the character representation of the
3264 * CPU bitmask (and one more byte for the newline):
3265 */
3266static char mask_str[NR_CPUS + 1];
3267
Ingo Molnarc7078de2008-05-12 21:20:52 +02003268static ssize_t
3269tracing_cpumask_read(struct file *filp, char __user *ubuf,
3270 size_t count, loff_t *ppos)
3271{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003272 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003273 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003274
3275 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003276
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003277 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003278 if (count - len < 2) {
3279 count = -EINVAL;
3280 goto out_err;
3281 }
3282 len += sprintf(mask_str + len, "\n");
3283 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3284
3285out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003286 mutex_unlock(&tracing_cpumask_update_lock);
3287
3288 return count;
3289}
3290
3291static ssize_t
3292tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3293 size_t count, loff_t *ppos)
3294{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003295 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303296 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003297 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303298
3299 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3300 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003301
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303302 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003303 if (err)
3304 goto err_unlock;
3305
Li Zefan215368e2009-06-15 10:56:42 +08003306 mutex_lock(&tracing_cpumask_update_lock);
3307
Steven Rostedta5e25882008-12-02 15:34:05 -05003308 local_irq_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003309 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003310 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003311 /*
3312 * Increase/decrease the disabled counter if we are
3313 * about to flip a bit in the cpumask:
3314 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003315 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303316 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003317 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3318 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003319 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003320 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303321 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003322 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3323 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003324 }
3325 }
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01003326 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003327 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003328
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003329 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003330
Ingo Molnarc7078de2008-05-12 21:20:52 +02003331 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303332 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003333
Ingo Molnarc7078de2008-05-12 21:20:52 +02003334 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003335
3336err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003337 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003338
3339 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003340}
3341
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003342static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003343 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003344 .read = tracing_cpumask_read,
3345 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003346 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003347 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003348};
3349
Li Zefanfdb372e2009-12-08 11:15:59 +08003350static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003351{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003352 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003353 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003354 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003355 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003356
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003357 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003358 tracer_flags = tr->current_trace->flags->val;
3359 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003360
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003361 for (i = 0; trace_options[i]; i++) {
3362 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003363 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003364 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003365 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003366 }
3367
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003368 for (i = 0; trace_opts[i].name; i++) {
3369 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003370 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003371 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003372 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003373 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003374 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003375
Li Zefanfdb372e2009-12-08 11:15:59 +08003376 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003377}
3378
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003379static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003380 struct tracer_flags *tracer_flags,
3381 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003382{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003383 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003384 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003385
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003386 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003387 if (ret)
3388 return ret;
3389
3390 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003391 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003392 else
Zhaolei77708412009-08-07 18:53:21 +08003393 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003394 return 0;
3395}
3396
Li Zefan8d18eaa2009-12-08 11:17:06 +08003397/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003398static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003399{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003400 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003401 struct tracer_flags *tracer_flags = trace->flags;
3402 struct tracer_opt *opts = NULL;
3403 int i;
3404
3405 for (i = 0; tracer_flags->opts[i].name; i++) {
3406 opts = &tracer_flags->opts[i];
3407
3408 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003409 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003410 }
3411
3412 return -EINVAL;
3413}
3414
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003415/* Some tracers require overwrite to stay enabled */
3416int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3417{
3418 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3419 return -1;
3420
3421 return 0;
3422}
3423
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003424int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003425{
3426 /* do nothing if flag is already set */
3427 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003428 return 0;
3429
3430 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003431 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003432 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003433 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003434
3435 if (enabled)
3436 trace_flags |= mask;
3437 else
3438 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003439
3440 if (mask == TRACE_ITER_RECORD_CMD)
3441 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003442
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003443 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003444 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003445#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003446 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003447#endif
3448 }
Steven Rostedt81698832012-10-11 10:15:05 -04003449
3450 if (mask == TRACE_ITER_PRINTK)
3451 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003452
3453 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003454}
3455
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003456static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003457{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003458 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003459 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003460 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003461 int i;
3462
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003463 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003464
Li Zefan8d18eaa2009-12-08 11:17:06 +08003465 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003466 neg = 1;
3467 cmp += 2;
3468 }
3469
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003470 mutex_lock(&trace_types_lock);
3471
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003472 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003473 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003474 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003475 break;
3476 }
3477 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003478
3479 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003480 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003481 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003482
3483 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003484
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003485 return ret;
3486}
3487
3488static ssize_t
3489tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3490 size_t cnt, loff_t *ppos)
3491{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003492 struct seq_file *m = filp->private_data;
3493 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003494 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003495 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003496
3497 if (cnt >= sizeof(buf))
3498 return -EINVAL;
3499
3500 if (copy_from_user(&buf, ubuf, cnt))
3501 return -EFAULT;
3502
Steven Rostedta8dd2172013-01-09 20:54:17 -05003503 buf[cnt] = 0;
3504
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003505 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003506 if (ret < 0)
3507 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003508
Jiri Olsacf8517c2009-10-23 19:36:16 -04003509 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003510
3511 return cnt;
3512}
3513
Li Zefanfdb372e2009-12-08 11:15:59 +08003514static int tracing_trace_options_open(struct inode *inode, struct file *file)
3515{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003516 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003517 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003518
Li Zefanfdb372e2009-12-08 11:15:59 +08003519 if (tracing_disabled)
3520 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003521
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003522 if (trace_array_get(tr) < 0)
3523 return -ENODEV;
3524
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003525 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3526 if (ret < 0)
3527 trace_array_put(tr);
3528
3529 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003530}
3531
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003532static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003533 .open = tracing_trace_options_open,
3534 .read = seq_read,
3535 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003536 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003537 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003538};
3539
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003540static const char readme_msg[] =
3541 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003542 "# echo 0 > tracing_on : quick way to disable tracing\n"
3543 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3544 " Important files:\n"
3545 " trace\t\t\t- The static contents of the buffer\n"
3546 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3547 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3548 " current_tracer\t- function and latency tracers\n"
3549 " available_tracers\t- list of configured tracers for current_tracer\n"
3550 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3551 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3552 " trace_clock\t\t-change the clock used to order events\n"
3553 " local: Per cpu clock but may not be synced across CPUs\n"
3554 " global: Synced across CPUs but slows tracing down.\n"
3555 " counter: Not a clock, but just an increment\n"
3556 " uptime: Jiffy counter from time of boot\n"
3557 " perf: Same clock that perf events use\n"
3558#ifdef CONFIG_X86_64
3559 " x86-tsc: TSC cycle counter\n"
3560#endif
3561 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3562 " tracing_cpumask\t- Limit which CPUs to trace\n"
3563 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3564 "\t\t\t Remove sub-buffer with rmdir\n"
3565 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003566 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3567 "\t\t\t option name\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003568#ifdef CONFIG_DYNAMIC_FTRACE
3569 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003570 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3571 "\t\t\t functions\n"
3572 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3573 "\t modules: Can select a group via module\n"
3574 "\t Format: :mod:<module-name>\n"
3575 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3576 "\t triggers: a command to perform when function is hit\n"
3577 "\t Format: <function>:<trigger>[:count]\n"
3578 "\t trigger: traceon, traceoff\n"
3579 "\t\t enable_event:<system>:<event>\n"
3580 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003581#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003582 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003583#endif
3584#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003585 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003586#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003587 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3588 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3589 "\t The first one will disable tracing every time do_fault is hit\n"
3590 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3591 "\t The first time do trap is hit and it disables tracing, the\n"
3592 "\t counter will decrement to 2. If tracing is already disabled,\n"
3593 "\t the counter will not decrement. It only decrements when the\n"
3594 "\t trigger did work\n"
3595 "\t To remove trigger without count:\n"
3596 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3597 "\t To remove trigger with a count:\n"
3598 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003599 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003600 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3601 "\t modules: Can select a group via module command :mod:\n"
3602 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003603#endif /* CONFIG_DYNAMIC_FTRACE */
3604#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003605 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3606 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003607#endif
3608#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3609 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3610 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3611#endif
3612#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003613 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3614 "\t\t\t snapshot buffer. Read the contents for more\n"
3615 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003616#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003617#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003618 " stack_trace\t\t- Shows the max stack trace when active\n"
3619 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003620 "\t\t\t Write into this file to reset the max size (trigger a\n"
3621 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003622#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003623 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3624 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003625#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003626#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003627 " events/\t\t- Directory containing all trace event subsystems:\n"
3628 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3629 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003630 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3631 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003632 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003633 " events/<system>/<event>/\t- Directory containing control files for\n"
3634 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003635 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3636 " filter\t\t- If set, only events passing filter are traced\n"
3637 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003638 "\t Format: <trigger>[:count][if <filter>]\n"
3639 "\t trigger: traceon, traceoff\n"
3640 "\t enable_event:<system>:<event>\n"
3641 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003642#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003643 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003644#endif
3645#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003646 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003647#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003648 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3649 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3650 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3651 "\t events/block/block_unplug/trigger\n"
3652 "\t The first disables tracing every time block_unplug is hit.\n"
3653 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3654 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3655 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3656 "\t Like function triggers, the counter is only decremented if it\n"
3657 "\t enabled or disabled tracing.\n"
3658 "\t To remove a trigger without a count:\n"
3659 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3660 "\t To remove a trigger with a count:\n"
3661 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3662 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003663;
3664
3665static ssize_t
3666tracing_readme_read(struct file *filp, char __user *ubuf,
3667 size_t cnt, loff_t *ppos)
3668{
3669 return simple_read_from_buffer(ubuf, cnt, ppos,
3670 readme_msg, strlen(readme_msg));
3671}
3672
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003673static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003674 .open = tracing_open_generic,
3675 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003676 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003677};
3678
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003679static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003680tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3681 size_t cnt, loff_t *ppos)
3682{
3683 char *buf_comm;
3684 char *file_buf;
3685 char *buf;
3686 int len = 0;
3687 int pid;
3688 int i;
3689
3690 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3691 if (!file_buf)
3692 return -ENOMEM;
3693
3694 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3695 if (!buf_comm) {
3696 kfree(file_buf);
3697 return -ENOMEM;
3698 }
3699
3700 buf = file_buf;
3701
3702 for (i = 0; i < SAVED_CMDLINES; i++) {
3703 int r;
3704
3705 pid = map_cmdline_to_pid[i];
3706 if (pid == -1 || pid == NO_CMDLINE_MAP)
3707 continue;
3708
3709 trace_find_cmdline(pid, buf_comm);
3710 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3711 buf += r;
3712 len += r;
3713 }
3714
3715 len = simple_read_from_buffer(ubuf, cnt, ppos,
3716 file_buf, len);
3717
3718 kfree(file_buf);
3719 kfree(buf_comm);
3720
3721 return len;
3722}
3723
3724static const struct file_operations tracing_saved_cmdlines_fops = {
3725 .open = tracing_open_generic,
3726 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003727 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003728};
3729
3730static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003731tracing_set_trace_read(struct file *filp, char __user *ubuf,
3732 size_t cnt, loff_t *ppos)
3733{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003734 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003735 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003736 int r;
3737
3738 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003739 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003740 mutex_unlock(&trace_types_lock);
3741
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003742 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003743}
3744
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003745int tracer_init(struct tracer *t, struct trace_array *tr)
3746{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003747 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003748 return t->init(tr);
3749}
3750
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003751static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003752{
3753 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003754
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003755 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003756 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003757}
3758
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003759#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003760/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003761static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3762 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003763{
3764 int cpu, ret = 0;
3765
3766 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3767 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003768 ret = ring_buffer_resize(trace_buf->buffer,
3769 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003770 if (ret < 0)
3771 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003772 per_cpu_ptr(trace_buf->data, cpu)->entries =
3773 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003774 }
3775 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003776 ret = ring_buffer_resize(trace_buf->buffer,
3777 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003778 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003779 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3780 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003781 }
3782
3783 return ret;
3784}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003785#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003786
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003787static int __tracing_resize_ring_buffer(struct trace_array *tr,
3788 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003789{
3790 int ret;
3791
3792 /*
3793 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003794 * we use the size that was given, and we can forget about
3795 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003796 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003797 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003798
Steven Rostedtb382ede62012-10-10 21:44:34 -04003799 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003800 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003801 return 0;
3802
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003803 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003804 if (ret < 0)
3805 return ret;
3806
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003807#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003808 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3809 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003810 goto out;
3811
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003812 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003813 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003814 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3815 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003816 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003817 /*
3818 * AARGH! We are left with different
3819 * size max buffer!!!!
3820 * The max buffer is our "snapshot" buffer.
3821 * When a tracer needs a snapshot (one of the
3822 * latency tracers), it swaps the max buffer
3823 * with the saved snap shot. We succeeded to
3824 * update the size of the main buffer, but failed to
3825 * update the size of the max buffer. But when we tried
3826 * to reset the main buffer to the original size, we
3827 * failed there too. This is very unlikely to
3828 * happen, but if it does, warn and kill all
3829 * tracing.
3830 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003831 WARN_ON(1);
3832 tracing_disabled = 1;
3833 }
3834 return ret;
3835 }
3836
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003837 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003838 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003839 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003840 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003841
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003842 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003843#endif /* CONFIG_TRACER_MAX_TRACE */
3844
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003845 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003846 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003847 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003848 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003849
3850 return ret;
3851}
3852
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003853static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3854 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003855{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003856 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003857
3858 mutex_lock(&trace_types_lock);
3859
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003860 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3861 /* make sure, this cpu is enabled in the mask */
3862 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3863 ret = -EINVAL;
3864 goto out;
3865 }
3866 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003867
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003868 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003869 if (ret < 0)
3870 ret = -ENOMEM;
3871
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003872out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003873 mutex_unlock(&trace_types_lock);
3874
3875 return ret;
3876}
3877
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003878
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003879/**
3880 * tracing_update_buffers - used by tracing facility to expand ring buffers
3881 *
3882 * To save on memory when the tracing is never used on a system with it
3883 * configured in. The ring buffers are set to a minimum size. But once
3884 * a user starts to use the tracing facility, then they need to grow
3885 * to their default size.
3886 *
3887 * This function is to be called when a tracer is about to be used.
3888 */
3889int tracing_update_buffers(void)
3890{
3891 int ret = 0;
3892
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003893 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003894 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003895 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003896 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003897 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003898
3899 return ret;
3900}
3901
Steven Rostedt577b7852009-02-26 23:43:05 -05003902struct trace_option_dentry;
3903
3904static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003905create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003906
3907static void
3908destroy_trace_option_files(struct trace_option_dentry *topts);
3909
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003910/*
3911 * Used to clear out the tracer before deletion of an instance.
3912 * Must have trace_types_lock held.
3913 */
3914static void tracing_set_nop(struct trace_array *tr)
3915{
3916 if (tr->current_trace == &nop_trace)
3917 return;
3918
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003919 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05003920
3921 if (tr->current_trace->reset)
3922 tr->current_trace->reset(tr);
3923
3924 tr->current_trace = &nop_trace;
3925}
3926
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003927static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003928{
Steven Rostedt577b7852009-02-26 23:43:05 -05003929 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003930 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003931#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003932 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003933#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003934 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003935
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003936 mutex_lock(&trace_types_lock);
3937
Steven Rostedt73c51622009-03-11 13:42:01 -04003938 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003939 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003940 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003941 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003942 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003943 ret = 0;
3944 }
3945
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003946 for (t = trace_types; t; t = t->next) {
3947 if (strcmp(t->name, buf) == 0)
3948 break;
3949 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003950 if (!t) {
3951 ret = -EINVAL;
3952 goto out;
3953 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003954 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003955 goto out;
3956
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003957 /* Some tracers are only allowed for the top level buffer */
3958 if (!trace_ok_for_array(t, tr)) {
3959 ret = -EINVAL;
3960 goto out;
3961 }
3962
Steven Rostedt9f029e82008-11-12 15:24:24 -05003963 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003964
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05003965 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003966
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003967 if (tr->current_trace->reset)
3968 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003969
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003970 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003971 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003972
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003973#ifdef CONFIG_TRACER_MAX_TRACE
3974 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05003975
3976 if (had_max_tr && !t->use_max_tr) {
3977 /*
3978 * We need to make sure that the update_max_tr sees that
3979 * current_trace changed to nop_trace to keep it from
3980 * swapping the buffers after we resize it.
3981 * The update_max_tr is called from interrupts disabled
3982 * so a synchronized_sched() is sufficient.
3983 */
3984 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003985 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003986 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003987#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05003988 /* Currently, only the top instance has options */
3989 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
3990 destroy_trace_option_files(topts);
3991 topts = create_trace_option_files(tr, t);
3992 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003993
3994#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003995 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04003996 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003997 if (ret < 0)
3998 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003999 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004000#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004001
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004002 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004003 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004004 if (ret)
4005 goto out;
4006 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004007
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004008 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004009 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004010 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004011 out:
4012 mutex_unlock(&trace_types_lock);
4013
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004014 return ret;
4015}
4016
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004017static ssize_t
4018tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4019 size_t cnt, loff_t *ppos)
4020{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004021 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004022 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004023 int i;
4024 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004025 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004026
Steven Rostedt60063a62008-10-28 10:44:24 -04004027 ret = cnt;
4028
Li Zefanee6c2c12009-09-18 14:06:47 +08004029 if (cnt > MAX_TRACER_SIZE)
4030 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004031
4032 if (copy_from_user(&buf, ubuf, cnt))
4033 return -EFAULT;
4034
4035 buf[cnt] = 0;
4036
4037 /* strip ending whitespace. */
4038 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4039 buf[i] = 0;
4040
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004041 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004042 if (err)
4043 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004044
Jiri Olsacf8517c2009-10-23 19:36:16 -04004045 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004046
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004047 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004048}
4049
4050static ssize_t
4051tracing_max_lat_read(struct file *filp, char __user *ubuf,
4052 size_t cnt, loff_t *ppos)
4053{
4054 unsigned long *ptr = filp->private_data;
4055 char buf[64];
4056 int r;
4057
Steven Rostedtcffae432008-05-12 21:21:00 +02004058 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004059 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004060 if (r > sizeof(buf))
4061 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004062 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004063}
4064
4065static ssize_t
4066tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4067 size_t cnt, loff_t *ppos)
4068{
Hannes Eder5e398412009-02-10 19:44:34 +01004069 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004070 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004071 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004072
Peter Huewe22fe9b52011-06-07 21:58:27 +02004073 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4074 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004075 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004076
4077 *ptr = val * 1000;
4078
4079 return cnt;
4080}
4081
Steven Rostedtb3806b42008-05-12 21:20:46 +02004082static int tracing_open_pipe(struct inode *inode, struct file *filp)
4083{
Oleg Nesterov15544202013-07-23 17:25:57 +02004084 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004085 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004086 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004087
4088 if (tracing_disabled)
4089 return -ENODEV;
4090
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004091 if (trace_array_get(tr) < 0)
4092 return -ENODEV;
4093
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004094 mutex_lock(&trace_types_lock);
4095
Steven Rostedtb3806b42008-05-12 21:20:46 +02004096 /* create a buffer to store the information to pass to userspace */
4097 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004098 if (!iter) {
4099 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004100 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004101 goto out;
4102 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004103
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004104 /*
4105 * We make a copy of the current tracer to avoid concurrent
4106 * changes on it while we are reading.
4107 */
4108 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4109 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004110 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004111 goto fail;
4112 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004113 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004114
4115 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4116 ret = -ENOMEM;
4117 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304118 }
4119
Steven Rostedta3097202008-11-07 22:36:02 -05004120 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304121 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004122
Steven Rostedt112f38a72009-06-01 15:16:05 -04004123 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4124 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4125
David Sharp8be07092012-11-13 12:18:22 -08004126 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004127 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004128 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4129
Oleg Nesterov15544202013-07-23 17:25:57 +02004130 iter->tr = tr;
4131 iter->trace_buffer = &tr->trace_buffer;
4132 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004133 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004134 filp->private_data = iter;
4135
Steven Rostedt107bad82008-05-12 21:21:01 +02004136 if (iter->trace->pipe_open)
4137 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004138
Arnd Bergmannb4447862010-07-07 23:40:11 +02004139 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004140out:
4141 mutex_unlock(&trace_types_lock);
4142 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004143
4144fail:
4145 kfree(iter->trace);
4146 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004147 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004148 mutex_unlock(&trace_types_lock);
4149 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004150}
4151
4152static int tracing_release_pipe(struct inode *inode, struct file *file)
4153{
4154 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004155 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004156
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004157 mutex_lock(&trace_types_lock);
4158
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004159 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004160 iter->trace->pipe_close(iter);
4161
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004162 mutex_unlock(&trace_types_lock);
4163
Rusty Russell44623442009-01-01 10:12:23 +10304164 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004165 mutex_destroy(&iter->mutex);
4166 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004167 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004168
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004169 trace_array_put(tr);
4170
Steven Rostedtb3806b42008-05-12 21:20:46 +02004171 return 0;
4172}
4173
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004174static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004175trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004176{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004177 /* Iterators are static, they should be filled or empty */
4178 if (trace_buffer_iter(iter, iter->cpu_file))
4179 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004180
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004181 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004182 /*
4183 * Always select as readable when in blocking mode
4184 */
4185 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004186 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004187 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004188 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004189}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004190
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004191static unsigned int
4192tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4193{
4194 struct trace_iterator *iter = filp->private_data;
4195
4196 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004197}
4198
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004199/*
4200 * This is a make-shift waitqueue.
4201 * A tracer might use this callback on some rare cases:
4202 *
4203 * 1) the current tracer might hold the runqueue lock when it wakes up
4204 * a reader, hence a deadlock (sched, function, and function graph tracers)
4205 * 2) the function tracers, trace all functions, we don't want
4206 * the overhead of calling wake_up and friends
4207 * (and tracing them too)
4208 *
4209 * Anyway, this is really very primitive wakeup.
4210 */
4211void poll_wait_pipe(struct trace_iterator *iter)
4212{
4213 set_current_state(TASK_INTERRUPTIBLE);
4214 /* sleep for 100 msecs, and try again. */
4215 schedule_timeout(HZ / 10);
4216}
4217
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004218/* Must be called with trace_types_lock mutex held. */
4219static int tracing_wait_pipe(struct file *filp)
4220{
4221 struct trace_iterator *iter = filp->private_data;
4222
4223 while (trace_empty(iter)) {
4224
4225 if ((filp->f_flags & O_NONBLOCK)) {
4226 return -EAGAIN;
4227 }
4228
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004229 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004230
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004231 iter->trace->wait_pipe(iter);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004232
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004233 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004234
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01004235 if (signal_pending(current))
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004236 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004237
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004238 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004239 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004240 * We still block if tracing is disabled, but we have never
4241 * read anything. This allows a user to cat this file, and
4242 * then enable tracing. But after we have read something,
4243 * we give an EOF when tracing is again disabled.
4244 *
4245 * iter->pos will be 0 if we haven't read anything.
4246 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004247 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004248 break;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004249 }
4250
4251 return 1;
4252}
4253
Steven Rostedtb3806b42008-05-12 21:20:46 +02004254/*
4255 * Consumer reader.
4256 */
4257static ssize_t
4258tracing_read_pipe(struct file *filp, char __user *ubuf,
4259 size_t cnt, loff_t *ppos)
4260{
4261 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004262 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004263 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004264
4265 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004266 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4267 if (sret != -EBUSY)
4268 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004269
Steven Rostedtf9520752009-03-02 14:04:40 -05004270 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004271
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004272 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004273 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004274 if (unlikely(iter->trace->name != tr->current_trace->name))
4275 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004276 mutex_unlock(&trace_types_lock);
4277
4278 /*
4279 * Avoid more than one consumer on a single file descriptor
4280 * This is just a matter of traces coherency, the ring buffer itself
4281 * is protected.
4282 */
4283 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004284 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004285 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4286 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004287 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004288 }
4289
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004290waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004291 sret = tracing_wait_pipe(filp);
4292 if (sret <= 0)
4293 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004294
4295 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004296 if (trace_empty(iter)) {
4297 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004298 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004299 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004300
4301 if (cnt >= PAGE_SIZE)
4302 cnt = PAGE_SIZE - 1;
4303
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004304 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004305 memset(&iter->seq, 0,
4306 sizeof(struct trace_iterator) -
4307 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004308 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004309 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004310
Lai Jiangshan4f535962009-05-18 19:35:34 +08004311 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004312 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004313 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004314 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004315 int len = iter->seq.len;
4316
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004317 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004318 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004319 /* don't print partial lines */
4320 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004321 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004322 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004323 if (ret != TRACE_TYPE_NO_CONSUME)
4324 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004325
4326 if (iter->seq.len >= cnt)
4327 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004328
4329 /*
4330 * Setting the full flag means we reached the trace_seq buffer
4331 * size and we should leave by partial output condition above.
4332 * One of the trace_seq_* functions is not used properly.
4333 */
4334 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4335 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004336 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004337 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004338 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004339
Steven Rostedtb3806b42008-05-12 21:20:46 +02004340 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004341 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4342 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004343 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004344
4345 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004346 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004347 * entries, go back to wait for more entries.
4348 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004349 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004350 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004351
Steven Rostedt107bad82008-05-12 21:21:01 +02004352out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004353 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004354
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004355 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004356}
4357
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004358static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4359 unsigned int idx)
4360{
4361 __free_page(spd->pages[idx]);
4362}
4363
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004364static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004365 .can_merge = 0,
4366 .map = generic_pipe_buf_map,
4367 .unmap = generic_pipe_buf_unmap,
4368 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004369 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004370 .steal = generic_pipe_buf_steal,
4371 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004372};
4373
Steven Rostedt34cd4992009-02-09 12:06:29 -05004374static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004375tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004376{
4377 size_t count;
4378 int ret;
4379
4380 /* Seq buffer is page-sized, exactly what we need. */
4381 for (;;) {
4382 count = iter->seq.len;
4383 ret = print_trace_line(iter);
4384 count = iter->seq.len - count;
4385 if (rem < count) {
4386 rem = 0;
4387 iter->seq.len -= count;
4388 break;
4389 }
4390 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4391 iter->seq.len -= count;
4392 break;
4393 }
4394
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004395 if (ret != TRACE_TYPE_NO_CONSUME)
4396 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004397 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004398 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004399 rem = 0;
4400 iter->ent = NULL;
4401 break;
4402 }
4403 }
4404
4405 return rem;
4406}
4407
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004408static ssize_t tracing_splice_read_pipe(struct file *filp,
4409 loff_t *ppos,
4410 struct pipe_inode_info *pipe,
4411 size_t len,
4412 unsigned int flags)
4413{
Jens Axboe35f3d142010-05-20 10:43:18 +02004414 struct page *pages_def[PIPE_DEF_BUFFERS];
4415 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004416 struct trace_iterator *iter = filp->private_data;
4417 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004418 .pages = pages_def,
4419 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004420 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004421 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004422 .flags = flags,
4423 .ops = &tracing_pipe_buf_ops,
4424 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004425 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004426 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004427 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004428 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004429 unsigned int i;
4430
Jens Axboe35f3d142010-05-20 10:43:18 +02004431 if (splice_grow_spd(pipe, &spd))
4432 return -ENOMEM;
4433
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004434 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004435 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004436 if (unlikely(iter->trace->name != tr->current_trace->name))
4437 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004438 mutex_unlock(&trace_types_lock);
4439
4440 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004441
4442 if (iter->trace->splice_read) {
4443 ret = iter->trace->splice_read(iter, filp,
4444 ppos, pipe, len, flags);
4445 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004446 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004447 }
4448
4449 ret = tracing_wait_pipe(filp);
4450 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004451 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004452
Jason Wessel955b61e2010-08-05 09:22:23 -05004453 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004454 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004455 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004456 }
4457
Lai Jiangshan4f535962009-05-18 19:35:34 +08004458 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004459 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004460
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004461 /* Fill as many pages as possible. */
Jens Axboe35f3d142010-05-20 10:43:18 +02004462 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
4463 spd.pages[i] = alloc_page(GFP_KERNEL);
4464 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004465 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004466
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004467 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004468
4469 /* Copy the data into the page, so we can start over. */
4470 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004471 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004472 iter->seq.len);
4473 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004474 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004475 break;
4476 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004477 spd.partial[i].offset = 0;
4478 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004479
Steven Rostedtf9520752009-03-02 14:04:40 -05004480 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004481 }
4482
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004483 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004484 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004485 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004486
4487 spd.nr_pages = i;
4488
Jens Axboe35f3d142010-05-20 10:43:18 +02004489 ret = splice_to_pipe(pipe, &spd);
4490out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004491 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004492 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004493
Steven Rostedt34cd4992009-02-09 12:06:29 -05004494out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004495 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004496 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004497}
4498
Steven Rostedta98a3c32008-05-12 21:20:59 +02004499static ssize_t
4500tracing_entries_read(struct file *filp, char __user *ubuf,
4501 size_t cnt, loff_t *ppos)
4502{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004503 struct inode *inode = file_inode(filp);
4504 struct trace_array *tr = inode->i_private;
4505 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004506 char buf[64];
4507 int r = 0;
4508 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004509
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004510 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004511
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004512 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004513 int cpu, buf_size_same;
4514 unsigned long size;
4515
4516 size = 0;
4517 buf_size_same = 1;
4518 /* check if all cpu sizes are same */
4519 for_each_tracing_cpu(cpu) {
4520 /* fill in the size from first enabled cpu */
4521 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004522 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4523 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004524 buf_size_same = 0;
4525 break;
4526 }
4527 }
4528
4529 if (buf_size_same) {
4530 if (!ring_buffer_expanded)
4531 r = sprintf(buf, "%lu (expanded: %lu)\n",
4532 size >> 10,
4533 trace_buf_size >> 10);
4534 else
4535 r = sprintf(buf, "%lu\n", size >> 10);
4536 } else
4537 r = sprintf(buf, "X\n");
4538 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004539 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004540
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004541 mutex_unlock(&trace_types_lock);
4542
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004543 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4544 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004545}
4546
4547static ssize_t
4548tracing_entries_write(struct file *filp, const char __user *ubuf,
4549 size_t cnt, loff_t *ppos)
4550{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004551 struct inode *inode = file_inode(filp);
4552 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004553 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004554 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004555
Peter Huewe22fe9b52011-06-07 21:58:27 +02004556 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4557 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004558 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004559
4560 /* must have at least 1 entry */
4561 if (!val)
4562 return -EINVAL;
4563
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004564 /* value is in KB */
4565 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004566 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004567 if (ret < 0)
4568 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004569
Jiri Olsacf8517c2009-10-23 19:36:16 -04004570 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004571
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004572 return cnt;
4573}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004574
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004575static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004576tracing_total_entries_read(struct file *filp, char __user *ubuf,
4577 size_t cnt, loff_t *ppos)
4578{
4579 struct trace_array *tr = filp->private_data;
4580 char buf[64];
4581 int r, cpu;
4582 unsigned long size = 0, expanded_size = 0;
4583
4584 mutex_lock(&trace_types_lock);
4585 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004586 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004587 if (!ring_buffer_expanded)
4588 expanded_size += trace_buf_size >> 10;
4589 }
4590 if (ring_buffer_expanded)
4591 r = sprintf(buf, "%lu\n", size);
4592 else
4593 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4594 mutex_unlock(&trace_types_lock);
4595
4596 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4597}
4598
4599static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004600tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4601 size_t cnt, loff_t *ppos)
4602{
4603 /*
4604 * There is no need to read what the user has written, this function
4605 * is just to make sure that there is no error when "echo" is used
4606 */
4607
4608 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004609
4610 return cnt;
4611}
4612
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004613static int
4614tracing_free_buffer_release(struct inode *inode, struct file *filp)
4615{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004616 struct trace_array *tr = inode->i_private;
4617
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004618 /* disable tracing ? */
4619 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004620 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004621 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004622 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004623
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004624 trace_array_put(tr);
4625
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004626 return 0;
4627}
4628
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004629static ssize_t
4630tracing_mark_write(struct file *filp, const char __user *ubuf,
4631 size_t cnt, loff_t *fpos)
4632{
Steven Rostedtd696b582011-09-22 11:50:27 -04004633 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004634 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004635 struct ring_buffer_event *event;
4636 struct ring_buffer *buffer;
4637 struct print_entry *entry;
4638 unsigned long irq_flags;
4639 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004640 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004641 int nr_pages = 1;
4642 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004643 int offset;
4644 int size;
4645 int len;
4646 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004647 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004648
Steven Rostedtc76f0692008-11-07 22:36:02 -05004649 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004650 return -EINVAL;
4651
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004652 if (!(trace_flags & TRACE_ITER_MARKERS))
4653 return -EINVAL;
4654
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004655 if (cnt > TRACE_BUF_SIZE)
4656 cnt = TRACE_BUF_SIZE;
4657
Steven Rostedtd696b582011-09-22 11:50:27 -04004658 /*
4659 * Userspace is injecting traces into the kernel trace buffer.
4660 * We want to be as non intrusive as possible.
4661 * To do so, we do not want to allocate any special buffers
4662 * or take any locks, but instead write the userspace data
4663 * straight into the ring buffer.
4664 *
4665 * First we need to pin the userspace buffer into memory,
4666 * which, most likely it is, because it just referenced it.
4667 * But there's no guarantee that it is. By using get_user_pages_fast()
4668 * and kmap_atomic/kunmap_atomic() we can get access to the
4669 * pages directly. We then write the data directly into the
4670 * ring buffer.
4671 */
4672 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004673
Steven Rostedtd696b582011-09-22 11:50:27 -04004674 /* check if we cross pages */
4675 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4676 nr_pages = 2;
4677
4678 offset = addr & (PAGE_SIZE - 1);
4679 addr &= PAGE_MASK;
4680
4681 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4682 if (ret < nr_pages) {
4683 while (--ret >= 0)
4684 put_page(pages[ret]);
4685 written = -EFAULT;
4686 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004687 }
4688
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004689 for (i = 0; i < nr_pages; i++)
4690 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004691
4692 local_save_flags(irq_flags);
4693 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004694 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004695 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4696 irq_flags, preempt_count());
4697 if (!event) {
4698 /* Ring buffer disabled, return as if not open for write */
4699 written = -EBADF;
4700 goto out_unlock;
4701 }
4702
4703 entry = ring_buffer_event_data(event);
4704 entry->ip = _THIS_IP_;
4705
4706 if (nr_pages == 2) {
4707 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004708 memcpy(&entry->buf, map_page[0] + offset, len);
4709 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004710 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004711 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004712
4713 if (entry->buf[cnt - 1] != '\n') {
4714 entry->buf[cnt] = '\n';
4715 entry->buf[cnt + 1] = '\0';
4716 } else
4717 entry->buf[cnt] = '\0';
4718
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004719 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004720
4721 written = cnt;
4722
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004723 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004724
Steven Rostedtd696b582011-09-22 11:50:27 -04004725 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004726 for (i = 0; i < nr_pages; i++){
4727 kunmap_atomic(map_page[i]);
4728 put_page(pages[i]);
4729 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004730 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004731 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004732}
4733
Li Zefan13f16d22009-12-08 11:16:11 +08004734static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004735{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004736 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004737 int i;
4738
4739 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004740 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004741 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004742 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4743 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004744 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004745
Li Zefan13f16d22009-12-08 11:16:11 +08004746 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004747}
4748
4749static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4750 size_t cnt, loff_t *fpos)
4751{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004752 struct seq_file *m = filp->private_data;
4753 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004754 char buf[64];
4755 const char *clockstr;
4756 int i;
4757
4758 if (cnt >= sizeof(buf))
4759 return -EINVAL;
4760
4761 if (copy_from_user(&buf, ubuf, cnt))
4762 return -EFAULT;
4763
4764 buf[cnt] = 0;
4765
4766 clockstr = strstrip(buf);
4767
4768 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4769 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4770 break;
4771 }
4772 if (i == ARRAY_SIZE(trace_clocks))
4773 return -EINVAL;
4774
Zhaolei5079f322009-08-25 16:12:56 +08004775 mutex_lock(&trace_types_lock);
4776
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004777 tr->clock_id = i;
4778
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004779 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004780
David Sharp60303ed2012-10-11 16:27:52 -07004781 /*
4782 * New clock may not be consistent with the previous clock.
4783 * Reset the buffer so that it doesn't have incomparable timestamps.
4784 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004785 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004786
4787#ifdef CONFIG_TRACER_MAX_TRACE
4788 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4789 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004790 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004791#endif
David Sharp60303ed2012-10-11 16:27:52 -07004792
Zhaolei5079f322009-08-25 16:12:56 +08004793 mutex_unlock(&trace_types_lock);
4794
4795 *fpos += cnt;
4796
4797 return cnt;
4798}
4799
Li Zefan13f16d22009-12-08 11:16:11 +08004800static int tracing_clock_open(struct inode *inode, struct file *file)
4801{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004802 struct trace_array *tr = inode->i_private;
4803 int ret;
4804
Li Zefan13f16d22009-12-08 11:16:11 +08004805 if (tracing_disabled)
4806 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004807
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004808 if (trace_array_get(tr))
4809 return -ENODEV;
4810
4811 ret = single_open(file, tracing_clock_show, inode->i_private);
4812 if (ret < 0)
4813 trace_array_put(tr);
4814
4815 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08004816}
4817
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004818struct ftrace_buffer_info {
4819 struct trace_iterator iter;
4820 void *spare;
4821 unsigned int read;
4822};
4823
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004824#ifdef CONFIG_TRACER_SNAPSHOT
4825static int tracing_snapshot_open(struct inode *inode, struct file *file)
4826{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004827 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004828 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004829 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004830 int ret = 0;
4831
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004832 if (trace_array_get(tr) < 0)
4833 return -ENODEV;
4834
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004835 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004836 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004837 if (IS_ERR(iter))
4838 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004839 } else {
4840 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004841 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004842 m = kzalloc(sizeof(*m), GFP_KERNEL);
4843 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004844 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004845 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4846 if (!iter) {
4847 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004848 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004849 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004850 ret = 0;
4851
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004852 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004853 iter->trace_buffer = &tr->max_buffer;
4854 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004855 m->private = iter;
4856 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004857 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004858out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004859 if (ret < 0)
4860 trace_array_put(tr);
4861
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004862 return ret;
4863}
4864
4865static ssize_t
4866tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4867 loff_t *ppos)
4868{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004869 struct seq_file *m = filp->private_data;
4870 struct trace_iterator *iter = m->private;
4871 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004872 unsigned long val;
4873 int ret;
4874
4875 ret = tracing_update_buffers();
4876 if (ret < 0)
4877 return ret;
4878
4879 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4880 if (ret)
4881 return ret;
4882
4883 mutex_lock(&trace_types_lock);
4884
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004885 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004886 ret = -EBUSY;
4887 goto out;
4888 }
4889
4890 switch (val) {
4891 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004892 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4893 ret = -EINVAL;
4894 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004895 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004896 if (tr->allocated_snapshot)
4897 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004898 break;
4899 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004900/* Only allow per-cpu swap if the ring buffer supports it */
4901#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4902 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4903 ret = -EINVAL;
4904 break;
4905 }
4906#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004907 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004908 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004909 if (ret < 0)
4910 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004911 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004912 local_irq_disable();
4913 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004914 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004915 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004916 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004917 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004918 local_irq_enable();
4919 break;
4920 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004921 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004922 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4923 tracing_reset_online_cpus(&tr->max_buffer);
4924 else
4925 tracing_reset(&tr->max_buffer, iter->cpu_file);
4926 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004927 break;
4928 }
4929
4930 if (ret >= 0) {
4931 *ppos += cnt;
4932 ret = cnt;
4933 }
4934out:
4935 mutex_unlock(&trace_types_lock);
4936 return ret;
4937}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004938
4939static int tracing_snapshot_release(struct inode *inode, struct file *file)
4940{
4941 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004942 int ret;
4943
4944 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004945
4946 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004947 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004948
4949 /* If write only, the seq_file is just a stub */
4950 if (m)
4951 kfree(m->private);
4952 kfree(m);
4953
4954 return 0;
4955}
4956
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004957static int tracing_buffers_open(struct inode *inode, struct file *filp);
4958static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4959 size_t count, loff_t *ppos);
4960static int tracing_buffers_release(struct inode *inode, struct file *file);
4961static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4962 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4963
4964static int snapshot_raw_open(struct inode *inode, struct file *filp)
4965{
4966 struct ftrace_buffer_info *info;
4967 int ret;
4968
4969 ret = tracing_buffers_open(inode, filp);
4970 if (ret < 0)
4971 return ret;
4972
4973 info = filp->private_data;
4974
4975 if (info->iter.trace->use_max_tr) {
4976 tracing_buffers_release(inode, filp);
4977 return -EBUSY;
4978 }
4979
4980 info->iter.snapshot = true;
4981 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4982
4983 return ret;
4984}
4985
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004986#endif /* CONFIG_TRACER_SNAPSHOT */
4987
4988
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004989static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004990 .open = tracing_open_generic,
4991 .read = tracing_max_lat_read,
4992 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004993 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004994};
4995
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004996static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004997 .open = tracing_open_generic,
4998 .read = tracing_set_trace_read,
4999 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005000 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005001};
5002
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005003static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005004 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005005 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005006 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005007 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005008 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005009 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005010};
5011
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005012static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005013 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005014 .read = tracing_entries_read,
5015 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005016 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005017 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005018};
5019
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005020static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005021 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005022 .read = tracing_total_entries_read,
5023 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005024 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005025};
5026
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005027static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005028 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005029 .write = tracing_free_buffer_write,
5030 .release = tracing_free_buffer_release,
5031};
5032
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005033static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005034 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005035 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005036 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005037 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005038};
5039
Zhaolei5079f322009-08-25 16:12:56 +08005040static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005041 .open = tracing_clock_open,
5042 .read = seq_read,
5043 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005044 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005045 .write = tracing_clock_write,
5046};
5047
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005048#ifdef CONFIG_TRACER_SNAPSHOT
5049static const struct file_operations snapshot_fops = {
5050 .open = tracing_snapshot_open,
5051 .read = seq_read,
5052 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005053 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005054 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005055};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005056
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005057static const struct file_operations snapshot_raw_fops = {
5058 .open = snapshot_raw_open,
5059 .read = tracing_buffers_read,
5060 .release = tracing_buffers_release,
5061 .splice_read = tracing_buffers_splice_read,
5062 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005063};
5064
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005065#endif /* CONFIG_TRACER_SNAPSHOT */
5066
Steven Rostedt2cadf912008-12-01 22:20:19 -05005067static int tracing_buffers_open(struct inode *inode, struct file *filp)
5068{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005069 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005070 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005071 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005072
5073 if (tracing_disabled)
5074 return -ENODEV;
5075
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005076 if (trace_array_get(tr) < 0)
5077 return -ENODEV;
5078
Steven Rostedt2cadf912008-12-01 22:20:19 -05005079 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005080 if (!info) {
5081 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005082 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005083 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005084
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005085 mutex_lock(&trace_types_lock);
5086
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005087 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005088 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005089 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005090 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005091 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005092 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005093 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005094
5095 filp->private_data = info;
5096
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005097 mutex_unlock(&trace_types_lock);
5098
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005099 ret = nonseekable_open(inode, filp);
5100 if (ret < 0)
5101 trace_array_put(tr);
5102
5103 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005104}
5105
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005106static unsigned int
5107tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5108{
5109 struct ftrace_buffer_info *info = filp->private_data;
5110 struct trace_iterator *iter = &info->iter;
5111
5112 return trace_poll(iter, filp, poll_table);
5113}
5114
Steven Rostedt2cadf912008-12-01 22:20:19 -05005115static ssize_t
5116tracing_buffers_read(struct file *filp, char __user *ubuf,
5117 size_t count, loff_t *ppos)
5118{
5119 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005120 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005121 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005122 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005123
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005124 if (!count)
5125 return 0;
5126
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005127 mutex_lock(&trace_types_lock);
5128
5129#ifdef CONFIG_TRACER_MAX_TRACE
5130 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5131 size = -EBUSY;
5132 goto out_unlock;
5133 }
5134#endif
5135
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005136 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005137 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5138 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005139 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005140 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005141 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005142
Steven Rostedt2cadf912008-12-01 22:20:19 -05005143 /* Do we have previous read data to read? */
5144 if (info->read < PAGE_SIZE)
5145 goto read;
5146
Steven Rostedtb6273442013-02-28 13:44:11 -05005147 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005148 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005149 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005150 &info->spare,
5151 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005152 iter->cpu_file, 0);
5153 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005154
5155 if (ret < 0) {
5156 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005157 if ((filp->f_flags & O_NONBLOCK)) {
5158 size = -EAGAIN;
5159 goto out_unlock;
5160 }
5161 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005162 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005163 mutex_lock(&trace_types_lock);
5164 if (signal_pending(current)) {
5165 size = -EINTR;
5166 goto out_unlock;
5167 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005168 goto again;
5169 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005170 size = 0;
5171 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005172 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005173
Steven Rostedt436fc282011-10-14 10:44:25 -04005174 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005175 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005176 size = PAGE_SIZE - info->read;
5177 if (size > count)
5178 size = count;
5179
5180 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005181 if (ret == size) {
5182 size = -EFAULT;
5183 goto out_unlock;
5184 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005185 size -= ret;
5186
Steven Rostedt2cadf912008-12-01 22:20:19 -05005187 *ppos += size;
5188 info->read += size;
5189
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005190 out_unlock:
5191 mutex_unlock(&trace_types_lock);
5192
Steven Rostedt2cadf912008-12-01 22:20:19 -05005193 return size;
5194}
5195
5196static int tracing_buffers_release(struct inode *inode, struct file *file)
5197{
5198 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005199 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005200
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005201 mutex_lock(&trace_types_lock);
5202
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005203 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005204
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005205 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005206 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005207 kfree(info);
5208
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005209 mutex_unlock(&trace_types_lock);
5210
Steven Rostedt2cadf912008-12-01 22:20:19 -05005211 return 0;
5212}
5213
5214struct buffer_ref {
5215 struct ring_buffer *buffer;
5216 void *page;
5217 int ref;
5218};
5219
5220static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5221 struct pipe_buffer *buf)
5222{
5223 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5224
5225 if (--ref->ref)
5226 return;
5227
5228 ring_buffer_free_read_page(ref->buffer, ref->page);
5229 kfree(ref);
5230 buf->private = 0;
5231}
5232
Steven Rostedt2cadf912008-12-01 22:20:19 -05005233static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5234 struct pipe_buffer *buf)
5235{
5236 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5237
5238 ref->ref++;
5239}
5240
5241/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005242static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005243 .can_merge = 0,
5244 .map = generic_pipe_buf_map,
5245 .unmap = generic_pipe_buf_unmap,
5246 .confirm = generic_pipe_buf_confirm,
5247 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005248 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005249 .get = buffer_pipe_buf_get,
5250};
5251
5252/*
5253 * Callback from splice_to_pipe(), if we need to release some pages
5254 * at the end of the spd in case we error'ed out in filling the pipe.
5255 */
5256static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5257{
5258 struct buffer_ref *ref =
5259 (struct buffer_ref *)spd->partial[i].private;
5260
5261 if (--ref->ref)
5262 return;
5263
5264 ring_buffer_free_read_page(ref->buffer, ref->page);
5265 kfree(ref);
5266 spd->partial[i].private = 0;
5267}
5268
5269static ssize_t
5270tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5271 struct pipe_inode_info *pipe, size_t len,
5272 unsigned int flags)
5273{
5274 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005275 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005276 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5277 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005278 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005279 .pages = pages_def,
5280 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005281 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005282 .flags = flags,
5283 .ops = &buffer_pipe_buf_ops,
5284 .spd_release = buffer_spd_release,
5285 };
5286 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005287 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005288 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005289
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005290 mutex_lock(&trace_types_lock);
5291
5292#ifdef CONFIG_TRACER_MAX_TRACE
5293 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5294 ret = -EBUSY;
5295 goto out;
5296 }
5297#endif
5298
5299 if (splice_grow_spd(pipe, &spd)) {
5300 ret = -ENOMEM;
5301 goto out;
5302 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005303
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005304 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005305 ret = -EINVAL;
5306 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005307 }
5308
5309 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005310 if (len < PAGE_SIZE) {
5311 ret = -EINVAL;
5312 goto out;
5313 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005314 len &= PAGE_MASK;
5315 }
5316
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005317 again:
5318 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005319 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005320
Jens Axboe35f3d142010-05-20 10:43:18 +02005321 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005322 struct page *page;
5323 int r;
5324
5325 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5326 if (!ref)
5327 break;
5328
Steven Rostedt7267fa62009-04-29 00:16:21 -04005329 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005330 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005331 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005332 if (!ref->page) {
5333 kfree(ref);
5334 break;
5335 }
5336
5337 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005338 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005339 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005340 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005341 kfree(ref);
5342 break;
5343 }
5344
5345 /*
5346 * zero out any left over data, this is going to
5347 * user land.
5348 */
5349 size = ring_buffer_page_len(ref->page);
5350 if (size < PAGE_SIZE)
5351 memset(ref->page + size, 0, PAGE_SIZE - size);
5352
5353 page = virt_to_page(ref->page);
5354
5355 spd.pages[i] = page;
5356 spd.partial[i].len = PAGE_SIZE;
5357 spd.partial[i].offset = 0;
5358 spd.partial[i].private = (unsigned long)ref;
5359 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005360 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005361
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005362 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005363 }
5364
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005365 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005366 spd.nr_pages = i;
5367
5368 /* did we read anything? */
5369 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005370 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005371 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005372 goto out;
5373 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005374 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05005375 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005376 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005377 if (signal_pending(current)) {
5378 ret = -EINTR;
5379 goto out;
5380 }
5381 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005382 }
5383
5384 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005385 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005386out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005387 mutex_unlock(&trace_types_lock);
5388
Steven Rostedt2cadf912008-12-01 22:20:19 -05005389 return ret;
5390}
5391
5392static const struct file_operations tracing_buffers_fops = {
5393 .open = tracing_buffers_open,
5394 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005395 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005396 .release = tracing_buffers_release,
5397 .splice_read = tracing_buffers_splice_read,
5398 .llseek = no_llseek,
5399};
5400
Steven Rostedtc8d77182009-04-29 18:03:45 -04005401static ssize_t
5402tracing_stats_read(struct file *filp, char __user *ubuf,
5403 size_t count, loff_t *ppos)
5404{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005405 struct inode *inode = file_inode(filp);
5406 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005407 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005408 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005409 struct trace_seq *s;
5410 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005411 unsigned long long t;
5412 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005413
Li Zefane4f2d102009-06-15 10:57:28 +08005414 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005415 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005416 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005417
5418 trace_seq_init(s);
5419
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005420 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005421 trace_seq_printf(s, "entries: %ld\n", cnt);
5422
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005423 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005424 trace_seq_printf(s, "overrun: %ld\n", cnt);
5425
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005426 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005427 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5428
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005429 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005430 trace_seq_printf(s, "bytes: %ld\n", cnt);
5431
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005432 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005433 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005434 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005435 usec_rem = do_div(t, USEC_PER_SEC);
5436 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5437 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005438
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005439 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005440 usec_rem = do_div(t, USEC_PER_SEC);
5441 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5442 } else {
5443 /* counter or tsc mode for trace_clock */
5444 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005445 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005446
5447 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005448 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005449 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005450
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005451 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005452 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5453
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005454 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005455 trace_seq_printf(s, "read events: %ld\n", cnt);
5456
Steven Rostedtc8d77182009-04-29 18:03:45 -04005457 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5458
5459 kfree(s);
5460
5461 return count;
5462}
5463
5464static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005465 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005466 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005467 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005468 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005469};
5470
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005471#ifdef CONFIG_DYNAMIC_FTRACE
5472
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005473int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005474{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005475 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005476}
5477
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005478static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005479tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005480 size_t cnt, loff_t *ppos)
5481{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005482 static char ftrace_dyn_info_buffer[1024];
5483 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005484 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005485 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005486 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005487 int r;
5488
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005489 mutex_lock(&dyn_info_mutex);
5490 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005491
Steven Rostedta26a2a22008-10-31 00:03:22 -04005492 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005493 buf[r++] = '\n';
5494
5495 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5496
5497 mutex_unlock(&dyn_info_mutex);
5498
5499 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005500}
5501
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005502static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005503 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005504 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005505 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005506};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005507#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005508
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005509#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5510static void
5511ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005512{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005513 tracing_snapshot();
5514}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005515
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005516static void
5517ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5518{
5519 unsigned long *count = (long *)data;
5520
5521 if (!*count)
5522 return;
5523
5524 if (*count != -1)
5525 (*count)--;
5526
5527 tracing_snapshot();
5528}
5529
5530static int
5531ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5532 struct ftrace_probe_ops *ops, void *data)
5533{
5534 long count = (long)data;
5535
5536 seq_printf(m, "%ps:", (void *)ip);
5537
5538 seq_printf(m, "snapshot");
5539
5540 if (count == -1)
5541 seq_printf(m, ":unlimited\n");
5542 else
5543 seq_printf(m, ":count=%ld\n", count);
5544
5545 return 0;
5546}
5547
5548static struct ftrace_probe_ops snapshot_probe_ops = {
5549 .func = ftrace_snapshot,
5550 .print = ftrace_snapshot_print,
5551};
5552
5553static struct ftrace_probe_ops snapshot_count_probe_ops = {
5554 .func = ftrace_count_snapshot,
5555 .print = ftrace_snapshot_print,
5556};
5557
5558static int
5559ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5560 char *glob, char *cmd, char *param, int enable)
5561{
5562 struct ftrace_probe_ops *ops;
5563 void *count = (void *)-1;
5564 char *number;
5565 int ret;
5566
5567 /* hash funcs only work with set_ftrace_filter */
5568 if (!enable)
5569 return -EINVAL;
5570
5571 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5572
5573 if (glob[0] == '!') {
5574 unregister_ftrace_function_probe_func(glob+1, ops);
5575 return 0;
5576 }
5577
5578 if (!param)
5579 goto out_reg;
5580
5581 number = strsep(&param, ":");
5582
5583 if (!strlen(number))
5584 goto out_reg;
5585
5586 /*
5587 * We use the callback data field (which is a pointer)
5588 * as our counter.
5589 */
5590 ret = kstrtoul(number, 0, (unsigned long *)&count);
5591 if (ret)
5592 return ret;
5593
5594 out_reg:
5595 ret = register_ftrace_function_probe(glob, ops, count);
5596
5597 if (ret >= 0)
5598 alloc_snapshot(&global_trace);
5599
5600 return ret < 0 ? ret : 0;
5601}
5602
5603static struct ftrace_func_command ftrace_snapshot_cmd = {
5604 .name = "snapshot",
5605 .func = ftrace_trace_snapshot_callback,
5606};
5607
Tom Zanussi38de93a2013-10-24 08:34:18 -05005608static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005609{
5610 return register_ftrace_command(&ftrace_snapshot_cmd);
5611}
5612#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005613static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005614#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005615
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005616struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005617{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005618 if (tr->dir)
5619 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005620
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005621 if (!debugfs_initialized())
5622 return NULL;
5623
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005624 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5625 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005626
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005627 if (!tr->dir)
5628 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005629
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005630 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005631}
5632
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005633struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005634{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005635 return tracing_init_dentry_tr(&global_trace);
5636}
5637
5638static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5639{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005640 struct dentry *d_tracer;
5641
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005642 if (tr->percpu_dir)
5643 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005644
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005645 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005646 if (!d_tracer)
5647 return NULL;
5648
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005649 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005650
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005651 WARN_ONCE(!tr->percpu_dir,
5652 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005653
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005654 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005655}
5656
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005657static struct dentry *
5658trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5659 void *data, long cpu, const struct file_operations *fops)
5660{
5661 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5662
5663 if (ret) /* See tracing_get_cpu() */
5664 ret->d_inode->i_cdev = (void *)(cpu + 1);
5665 return ret;
5666}
5667
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005668static void
5669tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005670{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005671 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005672 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005673 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005674
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005675 if (!d_percpu)
5676 return;
5677
Steven Rostedtdd49a382010-10-20 21:51:26 -04005678 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005679 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5680 if (!d_cpu) {
5681 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5682 return;
5683 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005684
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005685 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005686 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005687 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005688
5689 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005690 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005691 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005692
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005693 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005694 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005695
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005696 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005697 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005698
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005699 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005700 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005701
5702#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005703 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005704 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005705
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005706 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005707 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005708#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005709}
5710
Steven Rostedt60a11772008-05-12 21:20:44 +02005711#ifdef CONFIG_FTRACE_SELFTEST
5712/* Let selftest have access to static functions in this file */
5713#include "trace_selftest.c"
5714#endif
5715
Steven Rostedt577b7852009-02-26 23:43:05 -05005716struct trace_option_dentry {
5717 struct tracer_opt *opt;
5718 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005719 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005720 struct dentry *entry;
5721};
5722
5723static ssize_t
5724trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5725 loff_t *ppos)
5726{
5727 struct trace_option_dentry *topt = filp->private_data;
5728 char *buf;
5729
5730 if (topt->flags->val & topt->opt->bit)
5731 buf = "1\n";
5732 else
5733 buf = "0\n";
5734
5735 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5736}
5737
5738static ssize_t
5739trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5740 loff_t *ppos)
5741{
5742 struct trace_option_dentry *topt = filp->private_data;
5743 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005744 int ret;
5745
Peter Huewe22fe9b52011-06-07 21:58:27 +02005746 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5747 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005748 return ret;
5749
Li Zefan8d18eaa2009-12-08 11:17:06 +08005750 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005751 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005752
5753 if (!!(topt->flags->val & topt->opt->bit) != val) {
5754 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005755 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005756 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005757 mutex_unlock(&trace_types_lock);
5758 if (ret)
5759 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005760 }
5761
5762 *ppos += cnt;
5763
5764 return cnt;
5765}
5766
5767
5768static const struct file_operations trace_options_fops = {
5769 .open = tracing_open_generic,
5770 .read = trace_options_read,
5771 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005772 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005773};
5774
Steven Rostedta8259072009-02-26 22:19:12 -05005775static ssize_t
5776trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5777 loff_t *ppos)
5778{
5779 long index = (long)filp->private_data;
5780 char *buf;
5781
5782 if (trace_flags & (1 << index))
5783 buf = "1\n";
5784 else
5785 buf = "0\n";
5786
5787 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5788}
5789
5790static ssize_t
5791trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5792 loff_t *ppos)
5793{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005794 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005795 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005796 unsigned long val;
5797 int ret;
5798
Peter Huewe22fe9b52011-06-07 21:58:27 +02005799 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5800 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005801 return ret;
5802
Zhaoleif2d84b62009-08-07 18:55:48 +08005803 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005804 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005805
5806 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005807 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005808 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005809
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005810 if (ret < 0)
5811 return ret;
5812
Steven Rostedta8259072009-02-26 22:19:12 -05005813 *ppos += cnt;
5814
5815 return cnt;
5816}
5817
Steven Rostedta8259072009-02-26 22:19:12 -05005818static const struct file_operations trace_options_core_fops = {
5819 .open = tracing_open_generic,
5820 .read = trace_options_core_read,
5821 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005822 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005823};
5824
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005825struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04005826 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005827 struct dentry *parent,
5828 void *data,
5829 const struct file_operations *fops)
5830{
5831 struct dentry *ret;
5832
5833 ret = debugfs_create_file(name, mode, parent, data, fops);
5834 if (!ret)
5835 pr_warning("Could not create debugfs '%s' entry\n", name);
5836
5837 return ret;
5838}
5839
5840
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005841static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005842{
5843 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005844
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005845 if (tr->options)
5846 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005847
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005848 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005849 if (!d_tracer)
5850 return NULL;
5851
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005852 tr->options = debugfs_create_dir("options", d_tracer);
5853 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005854 pr_warning("Could not create debugfs directory 'options'\n");
5855 return NULL;
5856 }
5857
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005858 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005859}
5860
Steven Rostedt577b7852009-02-26 23:43:05 -05005861static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005862create_trace_option_file(struct trace_array *tr,
5863 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005864 struct tracer_flags *flags,
5865 struct tracer_opt *opt)
5866{
5867 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005868
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005869 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005870 if (!t_options)
5871 return;
5872
5873 topt->flags = flags;
5874 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005875 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005876
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005877 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005878 &trace_options_fops);
5879
Steven Rostedt577b7852009-02-26 23:43:05 -05005880}
5881
5882static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005883create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005884{
5885 struct trace_option_dentry *topts;
5886 struct tracer_flags *flags;
5887 struct tracer_opt *opts;
5888 int cnt;
5889
5890 if (!tracer)
5891 return NULL;
5892
5893 flags = tracer->flags;
5894
5895 if (!flags || !flags->opts)
5896 return NULL;
5897
5898 opts = flags->opts;
5899
5900 for (cnt = 0; opts[cnt].name; cnt++)
5901 ;
5902
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005903 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005904 if (!topts)
5905 return NULL;
5906
5907 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005908 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005909 &opts[cnt]);
5910
5911 return topts;
5912}
5913
5914static void
5915destroy_trace_option_files(struct trace_option_dentry *topts)
5916{
5917 int cnt;
5918
5919 if (!topts)
5920 return;
5921
5922 for (cnt = 0; topts[cnt].opt; cnt++) {
5923 if (topts[cnt].entry)
5924 debugfs_remove(topts[cnt].entry);
5925 }
5926
5927 kfree(topts);
5928}
5929
Steven Rostedta8259072009-02-26 22:19:12 -05005930static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005931create_trace_option_core_file(struct trace_array *tr,
5932 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005933{
5934 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005935
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005936 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005937 if (!t_options)
5938 return NULL;
5939
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005940 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005941 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005942}
5943
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005944static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005945{
5946 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005947 int i;
5948
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005949 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005950 if (!t_options)
5951 return;
5952
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005953 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005954 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005955}
5956
Steven Rostedt499e5472012-02-22 15:50:28 -05005957static ssize_t
5958rb_simple_read(struct file *filp, char __user *ubuf,
5959 size_t cnt, loff_t *ppos)
5960{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005961 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05005962 char buf[64];
5963 int r;
5964
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005965 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05005966 r = sprintf(buf, "%d\n", r);
5967
5968 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5969}
5970
5971static ssize_t
5972rb_simple_write(struct file *filp, const char __user *ubuf,
5973 size_t cnt, loff_t *ppos)
5974{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005975 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005976 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005977 unsigned long val;
5978 int ret;
5979
5980 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5981 if (ret)
5982 return ret;
5983
5984 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005985 mutex_lock(&trace_types_lock);
5986 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005987 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005988 if (tr->current_trace->start)
5989 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005990 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005991 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005992 if (tr->current_trace->stop)
5993 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005994 }
5995 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05005996 }
5997
5998 (*ppos)++;
5999
6000 return cnt;
6001}
6002
6003static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006004 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006005 .read = rb_simple_read,
6006 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006007 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006008 .llseek = default_llseek,
6009};
6010
Steven Rostedt277ba042012-08-03 16:10:49 -04006011struct dentry *trace_instance_dir;
6012
6013static void
6014init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6015
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006016static int
6017allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006018{
6019 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006020
6021 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6022
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006023 buf->tr = tr;
6024
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006025 buf->buffer = ring_buffer_alloc(size, rb_flags);
6026 if (!buf->buffer)
6027 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006028
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006029 buf->data = alloc_percpu(struct trace_array_cpu);
6030 if (!buf->data) {
6031 ring_buffer_free(buf->buffer);
6032 return -ENOMEM;
6033 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006034
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006035 /* Allocate the first page for all buffers */
6036 set_buffer_entries(&tr->trace_buffer,
6037 ring_buffer_size(tr->trace_buffer.buffer, 0));
6038
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006039 return 0;
6040}
6041
6042static int allocate_trace_buffers(struct trace_array *tr, int size)
6043{
6044 int ret;
6045
6046 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6047 if (ret)
6048 return ret;
6049
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006050#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006051 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6052 allocate_snapshot ? size : 1);
6053 if (WARN_ON(ret)) {
6054 ring_buffer_free(tr->trace_buffer.buffer);
6055 free_percpu(tr->trace_buffer.data);
6056 return -ENOMEM;
6057 }
6058 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006059
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006060 /*
6061 * Only the top level trace array gets its snapshot allocated
6062 * from the kernel command line.
6063 */
6064 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006065#endif
6066 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006067}
6068
6069static int new_instance_create(const char *name)
6070{
Steven Rostedt277ba042012-08-03 16:10:49 -04006071 struct trace_array *tr;
6072 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006073
6074 mutex_lock(&trace_types_lock);
6075
6076 ret = -EEXIST;
6077 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6078 if (tr->name && strcmp(tr->name, name) == 0)
6079 goto out_unlock;
6080 }
6081
6082 ret = -ENOMEM;
6083 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6084 if (!tr)
6085 goto out_unlock;
6086
6087 tr->name = kstrdup(name, GFP_KERNEL);
6088 if (!tr->name)
6089 goto out_free_tr;
6090
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006091 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6092 goto out_free_tr;
6093
6094 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6095
Steven Rostedt277ba042012-08-03 16:10:49 -04006096 raw_spin_lock_init(&tr->start_lock);
6097
6098 tr->current_trace = &nop_trace;
6099
6100 INIT_LIST_HEAD(&tr->systems);
6101 INIT_LIST_HEAD(&tr->events);
6102
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006103 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006104 goto out_free_tr;
6105
Steven Rostedt277ba042012-08-03 16:10:49 -04006106 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6107 if (!tr->dir)
6108 goto out_free_tr;
6109
6110 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006111 if (ret) {
6112 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006113 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006114 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006115
6116 init_tracer_debugfs(tr, tr->dir);
6117
6118 list_add(&tr->list, &ftrace_trace_arrays);
6119
6120 mutex_unlock(&trace_types_lock);
6121
6122 return 0;
6123
6124 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006125 if (tr->trace_buffer.buffer)
6126 ring_buffer_free(tr->trace_buffer.buffer);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006127 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006128 kfree(tr->name);
6129 kfree(tr);
6130
6131 out_unlock:
6132 mutex_unlock(&trace_types_lock);
6133
6134 return ret;
6135
6136}
6137
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006138static int instance_delete(const char *name)
6139{
6140 struct trace_array *tr;
6141 int found = 0;
6142 int ret;
6143
6144 mutex_lock(&trace_types_lock);
6145
6146 ret = -ENODEV;
6147 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6148 if (tr->name && strcmp(tr->name, name) == 0) {
6149 found = 1;
6150 break;
6151 }
6152 }
6153 if (!found)
6154 goto out_unlock;
6155
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006156 ret = -EBUSY;
6157 if (tr->ref)
6158 goto out_unlock;
6159
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006160 list_del(&tr->list);
6161
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006162 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006163 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006164 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006165 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006166 free_percpu(tr->trace_buffer.data);
6167 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006168
6169 kfree(tr->name);
6170 kfree(tr);
6171
6172 ret = 0;
6173
6174 out_unlock:
6175 mutex_unlock(&trace_types_lock);
6176
6177 return ret;
6178}
6179
Steven Rostedt277ba042012-08-03 16:10:49 -04006180static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6181{
6182 struct dentry *parent;
6183 int ret;
6184
6185 /* Paranoid: Make sure the parent is the "instances" directory */
6186 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6187 if (WARN_ON_ONCE(parent != trace_instance_dir))
6188 return -ENOENT;
6189
6190 /*
6191 * The inode mutex is locked, but debugfs_create_dir() will also
6192 * take the mutex. As the instances directory can not be destroyed
6193 * or changed in any other way, it is safe to unlock it, and
6194 * let the dentry try. If two users try to make the same dir at
6195 * the same time, then the new_instance_create() will determine the
6196 * winner.
6197 */
6198 mutex_unlock(&inode->i_mutex);
6199
6200 ret = new_instance_create(dentry->d_iname);
6201
6202 mutex_lock(&inode->i_mutex);
6203
6204 return ret;
6205}
6206
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006207static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6208{
6209 struct dentry *parent;
6210 int ret;
6211
6212 /* Paranoid: Make sure the parent is the "instances" directory */
6213 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6214 if (WARN_ON_ONCE(parent != trace_instance_dir))
6215 return -ENOENT;
6216
6217 /* The caller did a dget() on dentry */
6218 mutex_unlock(&dentry->d_inode->i_mutex);
6219
6220 /*
6221 * The inode mutex is locked, but debugfs_create_dir() will also
6222 * take the mutex. As the instances directory can not be destroyed
6223 * or changed in any other way, it is safe to unlock it, and
6224 * let the dentry try. If two users try to make the same dir at
6225 * the same time, then the instance_delete() will determine the
6226 * winner.
6227 */
6228 mutex_unlock(&inode->i_mutex);
6229
6230 ret = instance_delete(dentry->d_iname);
6231
6232 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6233 mutex_lock(&dentry->d_inode->i_mutex);
6234
6235 return ret;
6236}
6237
Steven Rostedt277ba042012-08-03 16:10:49 -04006238static const struct inode_operations instance_dir_inode_operations = {
6239 .lookup = simple_lookup,
6240 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006241 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006242};
6243
6244static __init void create_trace_instances(struct dentry *d_tracer)
6245{
6246 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6247 if (WARN_ON(!trace_instance_dir))
6248 return;
6249
6250 /* Hijack the dir inode operations, to allow mkdir */
6251 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6252}
6253
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006254static void
6255init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6256{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006257 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006258
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006259 trace_create_file("available_tracers", 0444, d_tracer,
6260 tr, &show_traces_fops);
6261
6262 trace_create_file("current_tracer", 0644, d_tracer,
6263 tr, &set_tracer_fops);
6264
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006265 trace_create_file("tracing_cpumask", 0644, d_tracer,
6266 tr, &tracing_cpumask_fops);
6267
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006268 trace_create_file("trace_options", 0644, d_tracer,
6269 tr, &tracing_iter_fops);
6270
6271 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006272 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006273
6274 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006275 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006276
6277 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006278 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006279
6280 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6281 tr, &tracing_total_entries_fops);
6282
Wang YanQing238ae932013-05-26 16:52:01 +08006283 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006284 tr, &tracing_free_buffer_fops);
6285
6286 trace_create_file("trace_marker", 0220, d_tracer,
6287 tr, &tracing_mark_fops);
6288
6289 trace_create_file("trace_clock", 0644, d_tracer, tr,
6290 &trace_clock_fops);
6291
6292 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006293 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006294
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006295 if (ftrace_create_function_files(tr, d_tracer))
6296 WARN(1, "Could not allocate function filter files");
6297
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006298#ifdef CONFIG_TRACER_SNAPSHOT
6299 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006300 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006301#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006302
6303 for_each_tracing_cpu(cpu)
6304 tracing_init_debugfs_percpu(tr, cpu);
6305
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006306}
6307
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006308static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006309{
6310 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006311
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006312 trace_access_lock_init();
6313
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006314 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006315 if (!d_tracer)
6316 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006317
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006318 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006319
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04006320#ifdef CONFIG_TRACER_MAX_TRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006321 trace_create_file("tracing_max_latency", 0644, d_tracer,
6322 &tracing_max_latency, &tracing_max_lat_fops);
Tim Bird0e950172010-02-25 15:36:43 -08006323#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006324
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006325 trace_create_file("tracing_thresh", 0644, d_tracer,
6326 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006327
Li Zefan339ae5d2009-04-17 10:34:30 +08006328 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006329 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006330
Avadh Patel69abe6a2009-04-10 16:04:48 -04006331 trace_create_file("saved_cmdlines", 0444, d_tracer,
6332 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006333
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006334#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006335 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6336 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006337#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006338
Steven Rostedt277ba042012-08-03 16:10:49 -04006339 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006340
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006341 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006342
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006343 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006344}
6345
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006346static int trace_panic_handler(struct notifier_block *this,
6347 unsigned long event, void *unused)
6348{
Steven Rostedt944ac422008-10-23 19:26:08 -04006349 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006350 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006351 return NOTIFY_OK;
6352}
6353
6354static struct notifier_block trace_panic_notifier = {
6355 .notifier_call = trace_panic_handler,
6356 .next = NULL,
6357 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6358};
6359
6360static int trace_die_handler(struct notifier_block *self,
6361 unsigned long val,
6362 void *data)
6363{
6364 switch (val) {
6365 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006366 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006367 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006368 break;
6369 default:
6370 break;
6371 }
6372 return NOTIFY_OK;
6373}
6374
6375static struct notifier_block trace_die_notifier = {
6376 .notifier_call = trace_die_handler,
6377 .priority = 200
6378};
6379
6380/*
6381 * printk is set to max of 1024, we really don't need it that big.
6382 * Nothing should be printing 1000 characters anyway.
6383 */
6384#define TRACE_MAX_PRINT 1000
6385
6386/*
6387 * Define here KERN_TRACE so that we have one place to modify
6388 * it if we decide to change what log level the ftrace dump
6389 * should be at.
6390 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006391#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006392
Jason Wessel955b61e2010-08-05 09:22:23 -05006393void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006394trace_printk_seq(struct trace_seq *s)
6395{
6396 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006397 if (s->len >= TRACE_MAX_PRINT)
6398 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006399
6400 /* should be zero ended, but we are paranoid. */
6401 s->buffer[s->len] = 0;
6402
6403 printk(KERN_TRACE "%s", s->buffer);
6404
Steven Rostedtf9520752009-03-02 14:04:40 -05006405 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006406}
6407
Jason Wessel955b61e2010-08-05 09:22:23 -05006408void trace_init_global_iter(struct trace_iterator *iter)
6409{
6410 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006411 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006412 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006413 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006414
6415 if (iter->trace && iter->trace->open)
6416 iter->trace->open(iter);
6417
6418 /* Annotate start of buffers if we had overruns */
6419 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6420 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6421
6422 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6423 if (trace_clocks[iter->tr->clock_id].in_ns)
6424 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006425}
6426
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006427void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006428{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006429 /* use static because iter can be a bit big for the stack */
6430 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006431 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006432 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006433 unsigned long flags;
6434 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006435
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006436 /* Only allow one dump user at a time. */
6437 if (atomic_inc_return(&dump_running) != 1) {
6438 atomic_dec(&dump_running);
6439 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006440 }
6441
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006442 /*
6443 * Always turn off tracing when we dump.
6444 * We don't need to show trace output of what happens
6445 * between multiple crashes.
6446 *
6447 * If the user does a sysrq-z, then they can re-enable
6448 * tracing with echo 1 > tracing_on.
6449 */
6450 tracing_off();
6451
6452 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006453
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006454 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006455 trace_init_global_iter(&iter);
6456
Steven Rostedtd7690412008-10-01 00:29:53 -04006457 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006458 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006459 }
6460
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006461 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6462
Török Edwinb54d3de2008-11-22 13:28:48 +02006463 /* don't look at user memory in panic mode */
6464 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6465
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006466 switch (oops_dump_mode) {
6467 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006468 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006469 break;
6470 case DUMP_ORIG:
6471 iter.cpu_file = raw_smp_processor_id();
6472 break;
6473 case DUMP_NONE:
6474 goto out_enable;
6475 default:
6476 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006477 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006478 }
6479
6480 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006481
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006482 /* Did function tracer already get disabled? */
6483 if (ftrace_is_dead()) {
6484 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6485 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6486 }
6487
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006488 /*
6489 * We need to stop all tracing on all CPUS to read the
6490 * the next buffer. This is a bit expensive, but is
6491 * not done often. We fill all what we can read,
6492 * and then release the locks again.
6493 */
6494
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006495 while (!trace_empty(&iter)) {
6496
6497 if (!cnt)
6498 printk(KERN_TRACE "---------------------------------\n");
6499
6500 cnt++;
6501
6502 /* reset all but tr, trace, and overruns */
6503 memset(&iter.seq, 0,
6504 sizeof(struct trace_iterator) -
6505 offsetof(struct trace_iterator, seq));
6506 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6507 iter.pos = -1;
6508
Jason Wessel955b61e2010-08-05 09:22:23 -05006509 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006510 int ret;
6511
6512 ret = print_trace_line(&iter);
6513 if (ret != TRACE_TYPE_NO_CONSUME)
6514 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006515 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006516 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006517
6518 trace_printk_seq(&iter.seq);
6519 }
6520
6521 if (!cnt)
6522 printk(KERN_TRACE " (ftrace buffer empty)\n");
6523 else
6524 printk(KERN_TRACE "---------------------------------\n");
6525
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006526 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006527 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006528
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006529 for_each_tracing_cpu(cpu) {
6530 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006531 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006532 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006533 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006534}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006535EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006536
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006537__init static int tracer_alloc_buffers(void)
6538{
Steven Rostedt73c51622009-03-11 13:42:01 -04006539 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306540 int ret = -ENOMEM;
6541
David Sharp750912f2010-12-08 13:46:47 -08006542
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306543 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6544 goto out;
6545
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006546 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306547 goto out_free_buffer_mask;
6548
Steven Rostedt07d777f2011-09-22 14:01:55 -04006549 /* Only allocate trace_printk buffers if a trace_printk exists */
6550 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006551 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006552 trace_printk_init_buffers();
6553
Steven Rostedt73c51622009-03-11 13:42:01 -04006554 /* To save memory, keep the ring buffer size to its minimum */
6555 if (ring_buffer_expanded)
6556 ring_buf_size = trace_buf_size;
6557 else
6558 ring_buf_size = 1;
6559
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306560 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006561 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006562
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006563 raw_spin_lock_init(&global_trace.start_lock);
6564
Steven Rostedtab464282008-05-12 21:21:00 +02006565 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006566 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006567 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6568 WARN_ON(1);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306569 goto out_free_cpumask;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006570 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006571
Steven Rostedt499e5472012-02-22 15:50:28 -05006572 if (global_trace.buffer_disabled)
6573 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006574
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006575 trace_init_cmdlines();
6576
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006577 /*
6578 * register_tracer() might reference current_trace, so it
6579 * needs to be set before we register anything. This is
6580 * just a bootstrap of current_trace anyway.
6581 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006582 global_trace.current_trace = &nop_trace;
6583
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006584 register_tracer(&nop_trace);
6585
Steven Rostedt60a11772008-05-12 21:20:44 +02006586 /* All seems OK, enable tracing */
6587 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006588
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006589 atomic_notifier_chain_register(&panic_notifier_list,
6590 &trace_panic_notifier);
6591
6592 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006593
Steven Rostedtae63b31e2012-05-03 23:09:03 -04006594 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6595
6596 INIT_LIST_HEAD(&global_trace.systems);
6597 INIT_LIST_HEAD(&global_trace.events);
6598 list_add(&global_trace.list, &ftrace_trace_arrays);
6599
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006600 while (trace_boot_options) {
6601 char *option;
6602
6603 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006604 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006605 }
6606
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006607 register_snapshot_cmd();
6608
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006609 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006610
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306611out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006612 free_percpu(global_trace.trace_buffer.data);
6613#ifdef CONFIG_TRACER_MAX_TRACE
6614 free_percpu(global_trace.max_buffer.data);
6615#endif
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006616 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306617out_free_buffer_mask:
6618 free_cpumask_var(tracing_buffer_mask);
6619out:
6620 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006621}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006622
6623__init static int clear_boot_tracer(void)
6624{
6625 /*
6626 * The default tracer at boot buffer is an init section.
6627 * This function is called in lateinit. If we did not
6628 * find the boot tracer, then clear it out, to prevent
6629 * later registration from accessing the buffer that is
6630 * about to be freed.
6631 */
6632 if (!default_bootup_tracer)
6633 return 0;
6634
6635 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6636 default_bootup_tracer);
6637 default_bootup_tracer = NULL;
6638
6639 return 0;
6640}
6641
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006642early_initcall(tracer_alloc_buffers);
6643fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006644late_initcall(clear_boot_tracer);