blob: ed3fba1d6570524ca985a92c717921b9f35db725 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050066/* Pipe tracepoints to printk */
67struct trace_iterator *tracepoint_print_iter;
68int tracepoint_printk;
69
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010070/* For tracers that don't implement custom flags */
71static struct tracer_opt dummy_tracer_opt[] = {
72 { }
73};
74
75static struct tracer_flags dummy_tracer_flags = {
76 .val = 0,
77 .opts = dummy_tracer_opt
78};
79
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050080static int
81dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010082{
83 return 0;
84}
Steven Rostedt0f048702008-11-05 16:05:44 -050085
86/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040087 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
89 * occurred.
90 */
91static DEFINE_PER_CPU(bool, trace_cmdline_save);
92
93/*
Steven Rostedt0f048702008-11-05 16:05:44 -050094 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
97 * this back to zero.
98 */
Hannes Eder4fd27352009-02-10 19:44:12 +010099static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500100
Christoph Lameter9288f992009-10-07 19:17:45 -0400101DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -0400102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500126static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500127
Li Zefanee6c2c12009-09-18 14:06:47 +0800128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500130static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500132static bool allocate_snapshot;
133
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200134static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100135{
Chen Gang67012ab2013-04-08 12:06:44 +0800136 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500137 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400138 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500139 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100140 return 1;
141}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200142__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100143
Steven Rostedt944ac422008-10-23 19:26:08 -0400144static int __init set_ftrace_dump_on_oops(char *str)
145{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200146 if (*str++ != '=' || !*str) {
147 ftrace_dump_on_oops = DUMP_ALL;
148 return 1;
149 }
150
151 if (!strcmp("orig_cpu", str)) {
152 ftrace_dump_on_oops = DUMP_ORIG;
153 return 1;
154 }
155
156 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400157}
158__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200159
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400160static int __init stop_trace_on_warning(char *str)
161{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200162 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400164 return 1;
165}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200166__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400167
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400168static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500169{
170 allocate_snapshot = true;
171 /* We also need the main ring buffer expanded */
172 ring_buffer_expanded = true;
173 return 1;
174}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400175__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500176
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400177
178static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179static char *trace_boot_options __initdata;
180
181static int __init set_trace_boot_options(char *str)
182{
Chen Gang67012ab2013-04-08 12:06:44 +0800183 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400184 trace_boot_options = trace_boot_options_buf;
185 return 0;
186}
187__setup("trace_options=", set_trace_boot_options);
188
Steven Rostedte1e232c2014-02-10 23:38:46 -0500189static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190static char *trace_boot_clock __initdata;
191
192static int __init set_trace_boot_clock(char *str)
193{
194 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195 trace_boot_clock = trace_boot_clock_buf;
196 return 0;
197}
198__setup("trace_clock=", set_trace_boot_clock);
199
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500200static int __init set_tracepoint_printk(char *str)
201{
202 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203 tracepoint_printk = 1;
204 return 1;
205}
206__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400207
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800208unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200209{
210 nsec += 500;
211 do_div(nsec, 1000);
212 return nsec;
213}
214
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200215/*
216 * The global_trace is the descriptor that holds the tracing
217 * buffers for the live tracing. For each CPU, it contains
218 * a link list of pages that will store trace entries. The
219 * page descriptor of the pages in the memory is used to hold
220 * the link list by linking the lru item in the page descriptor
221 * to each of the pages in the buffer per CPU.
222 *
223 * For each active CPU there is a data field that holds the
224 * pages for the buffer for that CPU. Each CPU has the same number
225 * of pages allocated for its buffer.
226 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200227static struct trace_array global_trace;
228
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400229LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200230
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400231int trace_array_get(struct trace_array *this_tr)
232{
233 struct trace_array *tr;
234 int ret = -ENODEV;
235
236 mutex_lock(&trace_types_lock);
237 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
238 if (tr == this_tr) {
239 tr->ref++;
240 ret = 0;
241 break;
242 }
243 }
244 mutex_unlock(&trace_types_lock);
245
246 return ret;
247}
248
249static void __trace_array_put(struct trace_array *this_tr)
250{
251 WARN_ON(!this_tr->ref);
252 this_tr->ref--;
253}
254
255void trace_array_put(struct trace_array *this_tr)
256{
257 mutex_lock(&trace_types_lock);
258 __trace_array_put(this_tr);
259 mutex_unlock(&trace_types_lock);
260}
261
Tom Zanussif306cc82013-10-24 08:34:17 -0500262int filter_check_discard(struct ftrace_event_file *file, void *rec,
263 struct ring_buffer *buffer,
264 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500265{
Tom Zanussif306cc82013-10-24 08:34:17 -0500266 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267 !filter_match_preds(file->filter, rec)) {
268 ring_buffer_discard_commit(buffer, event);
269 return 1;
270 }
271
272 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500273}
Tom Zanussif306cc82013-10-24 08:34:17 -0500274EXPORT_SYMBOL_GPL(filter_check_discard);
275
276int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277 struct ring_buffer *buffer,
278 struct ring_buffer_event *event)
279{
280 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281 !filter_match_preds(call->filter, rec)) {
282 ring_buffer_discard_commit(buffer, event);
283 return 1;
284 }
285
286 return 0;
287}
288EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500289
Fabian Frederickad1438a2014-04-17 21:44:42 +0200290static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400291{
292 u64 ts;
293
294 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700295 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400296 return trace_clock_local();
297
Alexander Z Lam94571582013-08-02 18:36:16 -0700298 ts = ring_buffer_time_stamp(buf->buffer, cpu);
299 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400300
301 return ts;
302}
303
Alexander Z Lam94571582013-08-02 18:36:16 -0700304cycle_t ftrace_now(int cpu)
305{
306 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
307}
308
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400309/**
310 * tracing_is_enabled - Show if global_trace has been disabled
311 *
312 * Shows if the global trace has been enabled or not. It uses the
313 * mirror flag "buffer_disabled" to be used in fast paths such as for
314 * the irqsoff tracer. But it may be inaccurate due to races. If you
315 * need to know the accurate state, use tracing_is_on() which is a little
316 * slower, but accurate.
317 */
Steven Rostedt90369902008-11-05 16:05:44 -0500318int tracing_is_enabled(void)
319{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400320 /*
321 * For quick access (irqsoff uses this in fast path), just
322 * return the mirror variable of the state of the ring buffer.
323 * It's a little racy, but we don't really care.
324 */
325 smp_rmb();
326 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500327}
328
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200329/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400330 * trace_buf_size is the size in bytes that is allocated
331 * for a buffer. Note, the number of bytes is always rounded
332 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400333 *
334 * This number is purposely set to a low number of 16384.
335 * If the dump on oops happens, it will be much appreciated
336 * to not have to wait for all that output. Anyway this can be
337 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400339#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400340
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400341static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200342
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200343/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200344static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200345
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200346/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200347 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200348 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700349DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200350
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800351/*
352 * serialize the access of the ring buffer
353 *
354 * ring buffer serializes readers, but it is low level protection.
355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
356 * are not protected by ring buffer.
357 *
358 * The content of events may become garbage if we allow other process consumes
359 * these events concurrently:
360 * A) the page of the consumed events may become a normal page
361 * (not reader page) in ring buffer, and this page will be rewrited
362 * by events producer.
363 * B) The page of the consumed events may become a page for splice_read,
364 * and this page will be returned to system.
365 *
366 * These primitives allow multi process access to different cpu ring buffer
367 * concurrently.
368 *
369 * These primitives don't distinguish read-only and read-consume access.
370 * Multi read-only access are also serialized.
371 */
372
373#ifdef CONFIG_SMP
374static DECLARE_RWSEM(all_cpu_access_lock);
375static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
376
377static inline void trace_access_lock(int cpu)
378{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500379 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800380 /* gain it for accessing the whole ring buffer. */
381 down_write(&all_cpu_access_lock);
382 } else {
383 /* gain it for accessing a cpu ring buffer. */
384
Steven Rostedtae3b5092013-01-23 15:22:59 -0500385 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800386 down_read(&all_cpu_access_lock);
387
388 /* Secondly block other access to this @cpu ring buffer. */
389 mutex_lock(&per_cpu(cpu_access_lock, cpu));
390 }
391}
392
393static inline void trace_access_unlock(int cpu)
394{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500395 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800396 up_write(&all_cpu_access_lock);
397 } else {
398 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399 up_read(&all_cpu_access_lock);
400 }
401}
402
403static inline void trace_access_lock_init(void)
404{
405 int cpu;
406
407 for_each_possible_cpu(cpu)
408 mutex_init(&per_cpu(cpu_access_lock, cpu));
409}
410
411#else
412
413static DEFINE_MUTEX(access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
417 (void)cpu;
418 mutex_lock(&access_lock);
419}
420
421static inline void trace_access_unlock(int cpu)
422{
423 (void)cpu;
424 mutex_unlock(&access_lock);
425}
426
427static inline void trace_access_lock_init(void)
428{
429}
430
431#endif
432
Steven Rostedtee6bce52008-11-12 17:52:37 -0500433/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500434unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400435 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500436 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400437 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700438
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400439static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400440{
441 if (tr->trace_buffer.buffer)
442 ring_buffer_record_on(tr->trace_buffer.buffer);
443 /*
444 * This flag is looked at when buffers haven't been allocated
445 * yet, or by some tracers (like irqsoff), that just want to
446 * know if the ring buffer has been disabled, but it can handle
447 * races of where it gets disabled but we still do a record.
448 * As the check is in the fast path of the tracers, it is more
449 * important to be fast than accurate.
450 */
451 tr->buffer_disabled = 0;
452 /* Make the flag seen by readers */
453 smp_wmb();
454}
455
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200456/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500457 * tracing_on - enable tracing buffers
458 *
459 * This function enables tracing buffers that may have been
460 * disabled with tracing_off.
461 */
462void tracing_on(void)
463{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400464 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500465}
466EXPORT_SYMBOL_GPL(tracing_on);
467
468/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500469 * __trace_puts - write a constant string into the trace buffer.
470 * @ip: The address of the caller
471 * @str: The constant string to write
472 * @size: The size of the string.
473 */
474int __trace_puts(unsigned long ip, const char *str, int size)
475{
476 struct ring_buffer_event *event;
477 struct ring_buffer *buffer;
478 struct print_entry *entry;
479 unsigned long irq_flags;
480 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800481 int pc;
482
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800483 if (!(trace_flags & TRACE_ITER_PRINTK))
484 return 0;
485
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800486 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500487
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500488 if (unlikely(tracing_selftest_running || tracing_disabled))
489 return 0;
490
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500491 alloc = sizeof(*entry) + size + 2; /* possible \n added */
492
493 local_save_flags(irq_flags);
494 buffer = global_trace.trace_buffer.buffer;
495 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800496 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500497 if (!event)
498 return 0;
499
500 entry = ring_buffer_event_data(event);
501 entry->ip = ip;
502
503 memcpy(&entry->buf, str, size);
504
505 /* Add a newline if necessary */
506 if (entry->buf[size - 1] != '\n') {
507 entry->buf[size] = '\n';
508 entry->buf[size + 1] = '\0';
509 } else
510 entry->buf[size] = '\0';
511
512 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800513 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500514
515 return size;
516}
517EXPORT_SYMBOL_GPL(__trace_puts);
518
519/**
520 * __trace_bputs - write the pointer to a constant string into trace buffer
521 * @ip: The address of the caller
522 * @str: The constant string to write to the buffer to
523 */
524int __trace_bputs(unsigned long ip, const char *str)
525{
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct bputs_entry *entry;
529 unsigned long irq_flags;
530 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800531 int pc;
532
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800533 if (!(trace_flags & TRACE_ITER_PRINTK))
534 return 0;
535
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800536 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500537
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500538 if (unlikely(tracing_selftest_running || tracing_disabled))
539 return 0;
540
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500541 local_save_flags(irq_flags);
542 buffer = global_trace.trace_buffer.buffer;
543 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800544 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500545 if (!event)
546 return 0;
547
548 entry = ring_buffer_event_data(event);
549 entry->ip = ip;
550 entry->str = str;
551
552 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800553 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554
555 return 1;
556}
557EXPORT_SYMBOL_GPL(__trace_bputs);
558
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500559#ifdef CONFIG_TRACER_SNAPSHOT
560/**
561 * trace_snapshot - take a snapshot of the current buffer.
562 *
563 * This causes a swap between the snapshot buffer and the current live
564 * tracing buffer. You can use this to take snapshots of the live
565 * trace when some condition is triggered, but continue to trace.
566 *
567 * Note, make sure to allocate the snapshot with either
568 * a tracing_snapshot_alloc(), or by doing it manually
569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
570 *
571 * If the snapshot buffer is not allocated, it will stop tracing.
572 * Basically making a permanent snapshot.
573 */
574void tracing_snapshot(void)
575{
576 struct trace_array *tr = &global_trace;
577 struct tracer *tracer = tr->current_trace;
578 unsigned long flags;
579
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500580 if (in_nmi()) {
581 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 internal_trace_puts("*** snapshot is being ignored ***\n");
583 return;
584 }
585
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500586 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500587 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500589 tracing_off();
590 return;
591 }
592
593 /* Note, snapshot can not be used when the tracer uses it */
594 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500595 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500597 return;
598 }
599
600 local_irq_save(flags);
601 update_max_tr(tr, current, smp_processor_id());
602 local_irq_restore(flags);
603}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500604EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500605
606static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400608static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
609
610static int alloc_snapshot(struct trace_array *tr)
611{
612 int ret;
613
614 if (!tr->allocated_snapshot) {
615
616 /* allocate spare buffer */
617 ret = resize_buffer_duplicate_size(&tr->max_buffer,
618 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
619 if (ret < 0)
620 return ret;
621
622 tr->allocated_snapshot = true;
623 }
624
625 return 0;
626}
627
Fabian Frederickad1438a2014-04-17 21:44:42 +0200628static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400629{
630 /*
631 * We don't free the ring buffer. instead, resize it because
632 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 * we want preserve it.
634 */
635 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636 set_buffer_entries(&tr->max_buffer, 1);
637 tracing_reset_online_cpus(&tr->max_buffer);
638 tr->allocated_snapshot = false;
639}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500640
641/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500642 * tracing_alloc_snapshot - allocate snapshot buffer.
643 *
644 * This only allocates the snapshot buffer if it isn't already
645 * allocated - it doesn't also take a snapshot.
646 *
647 * This is meant to be used in cases where the snapshot buffer needs
648 * to be set up for events that can't sleep but need to be able to
649 * trigger a snapshot.
650 */
651int tracing_alloc_snapshot(void)
652{
653 struct trace_array *tr = &global_trace;
654 int ret;
655
656 ret = alloc_snapshot(tr);
657 WARN_ON(ret < 0);
658
659 return ret;
660}
661EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
662
663/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
665 *
666 * This is similar to trace_snapshot(), but it will allocate the
667 * snapshot buffer if it isn't already allocated. Use this only
668 * where it is safe to sleep, as the allocation may sleep.
669 *
670 * This causes a swap between the snapshot buffer and the current live
671 * tracing buffer. You can use this to take snapshots of the live
672 * trace when some condition is triggered, but continue to trace.
673 */
674void tracing_snapshot_alloc(void)
675{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500676 int ret;
677
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500678 ret = tracing_alloc_snapshot();
679 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400680 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500681
682 tracing_snapshot();
683}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500684EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500685#else
686void tracing_snapshot(void)
687{
688 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
689}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500690EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500691int tracing_alloc_snapshot(void)
692{
693 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
694 return -ENODEV;
695}
696EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500697void tracing_snapshot_alloc(void)
698{
699 /* Give warning */
700 tracing_snapshot();
701}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500702EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500703#endif /* CONFIG_TRACER_SNAPSHOT */
704
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400705static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706{
707 if (tr->trace_buffer.buffer)
708 ring_buffer_record_off(tr->trace_buffer.buffer);
709 /*
710 * This flag is looked at when buffers haven't been allocated
711 * yet, or by some tracers (like irqsoff), that just want to
712 * know if the ring buffer has been disabled, but it can handle
713 * races of where it gets disabled but we still do a record.
714 * As the check is in the fast path of the tracers, it is more
715 * important to be fast than accurate.
716 */
717 tr->buffer_disabled = 1;
718 /* Make the flag seen by readers */
719 smp_wmb();
720}
721
Steven Rostedt499e5472012-02-22 15:50:28 -0500722/**
723 * tracing_off - turn off tracing buffers
724 *
725 * This function stops the tracing buffers from recording data.
726 * It does not disable any overhead the tracers themselves may
727 * be causing. This function simply causes all recording to
728 * the ring buffers to fail.
729 */
730void tracing_off(void)
731{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400732 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500733}
734EXPORT_SYMBOL_GPL(tracing_off);
735
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400736void disable_trace_on_warning(void)
737{
738 if (__disable_trace_on_warning)
739 tracing_off();
740}
741
Steven Rostedt499e5472012-02-22 15:50:28 -0500742/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400743 * tracer_tracing_is_on - show real state of ring buffer enabled
744 * @tr : the trace array to know if ring buffer is enabled
745 *
746 * Shows real state of the ring buffer if it is enabled or not.
747 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400748static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400749{
750 if (tr->trace_buffer.buffer)
751 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752 return !tr->buffer_disabled;
753}
754
Steven Rostedt499e5472012-02-22 15:50:28 -0500755/**
756 * tracing_is_on - show state of ring buffers enabled
757 */
758int tracing_is_on(void)
759{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400760 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500761}
762EXPORT_SYMBOL_GPL(tracing_is_on);
763
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400764static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200765{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400766 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200767
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200768 if (!str)
769 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800770 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200771 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800772 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200773 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400774 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200775 return 1;
776}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400777__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200778
Tim Bird0e950172010-02-25 15:36:43 -0800779static int __init set_tracing_thresh(char *str)
780{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800781 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800782 int ret;
783
784 if (!str)
785 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200786 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800787 if (ret < 0)
788 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800789 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800790 return 1;
791}
792__setup("tracing_thresh=", set_tracing_thresh);
793
Steven Rostedt57f50be2008-05-12 21:20:44 +0200794unsigned long nsecs_to_usecs(unsigned long nsecs)
795{
796 return nsecs / 1000;
797}
798
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200799/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200800static const char *trace_options[] = {
801 "print-parent",
802 "sym-offset",
803 "sym-addr",
804 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200805 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200806 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200807 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200808 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200809 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100810 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500811 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500812 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500813 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200814 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200815 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100816 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200817 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500818 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400819 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400820 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800821 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800822 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400823 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500824 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700825 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400826 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200827 NULL
828};
829
Zhaolei5079f322009-08-25 16:12:56 +0800830static struct {
831 u64 (*func)(void);
832 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800833 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800834} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000835 { trace_clock_local, "local", 1 },
836 { trace_clock_global, "global", 1 },
837 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700838 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000839 { trace_clock, "perf", 1 },
840 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800841 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800842};
843
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200844/*
845 * trace_parser_get_init - gets the buffer for trace parser
846 */
847int trace_parser_get_init(struct trace_parser *parser, int size)
848{
849 memset(parser, 0, sizeof(*parser));
850
851 parser->buffer = kmalloc(size, GFP_KERNEL);
852 if (!parser->buffer)
853 return 1;
854
855 parser->size = size;
856 return 0;
857}
858
859/*
860 * trace_parser_put - frees the buffer for trace parser
861 */
862void trace_parser_put(struct trace_parser *parser)
863{
864 kfree(parser->buffer);
865}
866
867/*
868 * trace_get_user - reads the user input string separated by space
869 * (matched by isspace(ch))
870 *
871 * For each string found the 'struct trace_parser' is updated,
872 * and the function returns.
873 *
874 * Returns number of bytes read.
875 *
876 * See kernel/trace/trace.h for 'struct trace_parser' details.
877 */
878int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879 size_t cnt, loff_t *ppos)
880{
881 char ch;
882 size_t read = 0;
883 ssize_t ret;
884
885 if (!*ppos)
886 trace_parser_clear(parser);
887
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891
892 read++;
893 cnt--;
894
895 /*
896 * The parser is not finished with the last write,
897 * continue reading the user input without skipping spaces.
898 */
899 if (!parser->cont) {
900 /* skip white space */
901 while (cnt && isspace(ch)) {
902 ret = get_user(ch, ubuf++);
903 if (ret)
904 goto out;
905 read++;
906 cnt--;
907 }
908
909 /* only spaces were written */
910 if (isspace(ch)) {
911 *ppos += read;
912 ret = read;
913 goto out;
914 }
915
916 parser->idx = 0;
917 }
918
919 /* read the non-space input */
920 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800921 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200922 parser->buffer[parser->idx++] = ch;
923 else {
924 ret = -EINVAL;
925 goto out;
926 }
927 ret = get_user(ch, ubuf++);
928 if (ret)
929 goto out;
930 read++;
931 cnt--;
932 }
933
934 /* We either got finished input or we have to wait for another call. */
935 if (isspace(ch)) {
936 parser->buffer[parser->idx] = 0;
937 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400938 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200939 parser->cont = true;
940 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400941 } else {
942 ret = -EINVAL;
943 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200944 }
945
946 *ppos += read;
947 ret = read;
948
949out:
950 return ret;
951}
952
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400953/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200954static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200955{
956 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200957
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500958 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200959 return -EBUSY;
960
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500961 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200962 if (cnt > len)
963 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400964 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200965
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400966 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200967 return cnt;
968}
969
Tim Bird0e950172010-02-25 15:36:43 -0800970unsigned long __read_mostly tracing_thresh;
971
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400973/*
974 * Copy the new maximum trace into the separate maximum-trace
975 * structure. (this way the maximum trace is permanently saved,
976 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
977 */
978static void
979__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
980{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500981 struct trace_buffer *trace_buf = &tr->trace_buffer;
982 struct trace_buffer *max_buf = &tr->max_buffer;
983 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
984 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400985
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500986 max_buf->cpu = cpu;
987 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500989 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->critical_start = data->critical_start;
991 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400992
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300993 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400994 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400995 /*
996 * If tsk == current, then use current_uid(), as that does not use
997 * RCU. The irq tracer can be called out of RCU scope.
998 */
999 if (tsk == current)
1000 max_data->uid = current_uid();
1001 else
1002 max_data->uid = task_uid(tsk);
1003
Steven Rostedt8248ac02009-09-02 12:27:41 -04001004 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005 max_data->policy = tsk->policy;
1006 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001007
1008 /* record this tasks comm */
1009 tracing_record_cmdline(tsk);
1010}
1011
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001012/**
1013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014 * @tr: tracer
1015 * @tsk: the task with the latency
1016 * @cpu: The cpu that initiated the trace.
1017 *
1018 * Flip the buffers between the @tr and the max_tr and record information
1019 * about which task was the cause of this latency.
1020 */
Ingo Molnare309b412008-05-12 21:20:51 +02001021void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001022update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1023{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001024 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001025
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001026 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001027 return;
1028
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001029 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001030
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001031 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001032 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001033 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001034 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001035 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001036
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001037 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001039 buf = tr->trace_buffer.buffer;
1040 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001043 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001044 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045}
1046
1047/**
1048 * update_max_tr_single - only copy one trace over, and reset the rest
1049 * @tr - tracer
1050 * @tsk - task with the latency
1051 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001052 *
1053 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001054 */
Ingo Molnare309b412008-05-12 21:20:51 +02001055void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001056update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1057{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001058 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001059
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001060 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001061 return;
1062
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001063 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001064 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001065 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001066 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001067 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001068 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001069
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001070 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001071
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001072 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001073
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001074 if (ret == -EBUSY) {
1075 /*
1076 * We failed to swap the buffer due to a commit taking
1077 * place on this CPU. We fail to record, but we reset
1078 * the max trace buffer (no one writes directly to it)
1079 * and flag that it failed.
1080 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001081 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001082 "Failed to swap buffers due to commit in progress\n");
1083 }
1084
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001085 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001086
1087 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001088 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001089}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001090#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001091
Rabin Vincente30f53a2014-11-10 19:46:34 +01001092static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001094 /* Iterators are static, they should be filled or empty */
1095 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001096 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001097
Rabin Vincente30f53a2014-11-10 19:46:34 +01001098 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1099 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001100}
1101
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001102#ifdef CONFIG_FTRACE_STARTUP_TEST
1103static int run_tracer_selftest(struct tracer *type)
1104{
1105 struct trace_array *tr = &global_trace;
1106 struct tracer *saved_tracer = tr->current_trace;
1107 int ret;
1108
1109 if (!type->selftest || tracing_selftest_disabled)
1110 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111
1112 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001113 * Run a selftest on this tracer.
1114 * Here we reset the trace buffer, and set the current
1115 * tracer to be this tracer. The tracer can then run some
1116 * internal tracing to verify that everything is in order.
1117 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001118 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001119 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001120
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001121 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001122
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001123#ifdef CONFIG_TRACER_MAX_TRACE
1124 if (type->use_max_tr) {
1125 /* If we expanded the buffers, make sure the max is expanded too */
1126 if (ring_buffer_expanded)
1127 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128 RING_BUFFER_ALL_CPUS);
1129 tr->allocated_snapshot = true;
1130 }
1131#endif
1132
1133 /* the test is responsible for initializing and enabling */
1134 pr_info("Testing tracer %s: ", type->name);
1135 ret = type->selftest(type, tr);
1136 /* the test is responsible for resetting too */
1137 tr->current_trace = saved_tracer;
1138 if (ret) {
1139 printk(KERN_CONT "FAILED!\n");
1140 /* Add the warning after printing 'FAILED' */
1141 WARN_ON(1);
1142 return -1;
1143 }
1144 /* Only reset on passing, to avoid touching corrupted buffers */
1145 tracing_reset_online_cpus(&tr->trace_buffer);
1146
1147#ifdef CONFIG_TRACER_MAX_TRACE
1148 if (type->use_max_tr) {
1149 tr->allocated_snapshot = false;
1150
1151 /* Shrink the max buffer again */
1152 if (ring_buffer_expanded)
1153 ring_buffer_resize(tr->max_buffer.buffer, 1,
1154 RING_BUFFER_ALL_CPUS);
1155 }
1156#endif
1157
1158 printk(KERN_CONT "PASSED\n");
1159 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001160}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001161#else
1162static inline int run_tracer_selftest(struct tracer *type)
1163{
1164 return 0;
1165}
1166#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001167
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001168/**
1169 * register_tracer - register a tracer with the ftrace system.
1170 * @type - the plugin for the tracer
1171 *
1172 * Register a new plugin tracer.
1173 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001174int register_tracer(struct tracer *type)
1175{
1176 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001177 int ret = 0;
1178
1179 if (!type->name) {
1180 pr_info("Tracer must have a name\n");
1181 return -1;
1182 }
1183
Dan Carpenter24a461d2010-07-10 12:06:44 +02001184 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001185 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1186 return -1;
1187 }
1188
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001189 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001190
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001191 tracing_selftest_running = true;
1192
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001193 for (t = trace_types; t; t = t->next) {
1194 if (strcmp(type->name, t->name) == 0) {
1195 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001196 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001197 type->name);
1198 ret = -1;
1199 goto out;
1200 }
1201 }
1202
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001203 if (!type->set_flag)
1204 type->set_flag = &dummy_set_flag;
1205 if (!type->flags)
1206 type->flags = &dummy_tracer_flags;
1207 else
1208 if (!type->flags->opts)
1209 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001210
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001211 ret = run_tracer_selftest(type);
1212 if (ret < 0)
1213 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001214
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 type->next = trace_types;
1216 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001217
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001219 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001220 mutex_unlock(&trace_types_lock);
1221
Steven Rostedtdac74942009-02-05 01:13:38 -05001222 if (ret || !default_bootup_tracer)
1223 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001224
Li Zefanee6c2c12009-09-18 14:06:47 +08001225 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 goto out_unlock;
1227
1228 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001230 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001231 default_bootup_tracer = NULL;
1232 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001233 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001234#ifdef CONFIG_FTRACE_STARTUP_TEST
1235 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1236 type->name);
1237#endif
1238
1239 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001240 return ret;
1241}
1242
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001243void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001244{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001245 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001246
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001247 if (!buffer)
1248 return;
1249
Steven Rostedtf6339032009-09-04 12:35:16 -04001250 ring_buffer_record_disable(buffer);
1251
1252 /* Make sure all commits have finished */
1253 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001254 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001255
1256 ring_buffer_record_enable(buffer);
1257}
1258
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001259void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001260{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001261 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001262 int cpu;
1263
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001264 if (!buffer)
1265 return;
1266
Steven Rostedt621968c2009-09-04 12:02:35 -04001267 ring_buffer_record_disable(buffer);
1268
1269 /* Make sure all commits have finished */
1270 synchronize_sched();
1271
Alexander Z Lam94571582013-08-02 18:36:16 -07001272 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273
1274 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001275 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001276
1277 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001278}
1279
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001280/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001281void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001282{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001283 struct trace_array *tr;
1284
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001286 tracing_reset_online_cpus(&tr->trace_buffer);
1287#ifdef CONFIG_TRACER_MAX_TRACE
1288 tracing_reset_online_cpus(&tr->max_buffer);
1289#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001290 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001291}
1292
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001293#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001294#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001295static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001296struct saved_cmdlines_buffer {
1297 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298 unsigned *map_cmdline_to_pid;
1299 unsigned cmdline_num;
1300 int cmdline_idx;
1301 char *saved_cmdlines;
1302};
1303static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001304
Steven Rostedt25b0b442008-05-12 21:21:00 +02001305/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001306static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001307
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001308static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001309{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001310 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1311}
1312
1313static inline void set_cmdline(int idx, const char *cmdline)
1314{
1315 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1316}
1317
1318static int allocate_cmdlines_buffer(unsigned int val,
1319 struct saved_cmdlines_buffer *s)
1320{
1321 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1322 GFP_KERNEL);
1323 if (!s->map_cmdline_to_pid)
1324 return -ENOMEM;
1325
1326 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327 if (!s->saved_cmdlines) {
1328 kfree(s->map_cmdline_to_pid);
1329 return -ENOMEM;
1330 }
1331
1332 s->cmdline_idx = 0;
1333 s->cmdline_num = val;
1334 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335 sizeof(s->map_pid_to_cmdline));
1336 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337 val * sizeof(*s->map_cmdline_to_pid));
1338
1339 return 0;
1340}
1341
1342static int trace_create_savedcmd(void)
1343{
1344 int ret;
1345
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001346 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001347 if (!savedcmd)
1348 return -ENOMEM;
1349
1350 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1351 if (ret < 0) {
1352 kfree(savedcmd);
1353 savedcmd = NULL;
1354 return -ENOMEM;
1355 }
1356
1357 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001358}
1359
Carsten Emdeb5130b12009-09-13 01:43:07 +02001360int is_tracing_stopped(void)
1361{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001362 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001363}
1364
Steven Rostedt0f048702008-11-05 16:05:44 -05001365/**
1366 * tracing_start - quick start of the tracer
1367 *
1368 * If tracing is enabled but was stopped by tracing_stop,
1369 * this will start the tracer back up.
1370 */
1371void tracing_start(void)
1372{
1373 struct ring_buffer *buffer;
1374 unsigned long flags;
1375
1376 if (tracing_disabled)
1377 return;
1378
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380 if (--global_trace.stop_count) {
1381 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001382 /* Someone screwed up their debugging */
1383 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001384 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001385 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001386 goto out;
1387 }
1388
Steven Rostedta2f80712010-03-12 19:56:00 -05001389 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001390 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001391
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001392 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001393 if (buffer)
1394 ring_buffer_record_enable(buffer);
1395
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001396#ifdef CONFIG_TRACER_MAX_TRACE
1397 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001398 if (buffer)
1399 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001400#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001401
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001402 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001403
Steven Rostedt0f048702008-11-05 16:05:44 -05001404 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001405 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1406}
1407
1408static void tracing_start_tr(struct trace_array *tr)
1409{
1410 struct ring_buffer *buffer;
1411 unsigned long flags;
1412
1413 if (tracing_disabled)
1414 return;
1415
1416 /* If global, we need to also start the max tracer */
1417 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418 return tracing_start();
1419
1420 raw_spin_lock_irqsave(&tr->start_lock, flags);
1421
1422 if (--tr->stop_count) {
1423 if (tr->stop_count < 0) {
1424 /* Someone screwed up their debugging */
1425 WARN_ON_ONCE(1);
1426 tr->stop_count = 0;
1427 }
1428 goto out;
1429 }
1430
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001431 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001432 if (buffer)
1433 ring_buffer_record_enable(buffer);
1434
1435 out:
1436 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001437}
1438
1439/**
1440 * tracing_stop - quick stop of the tracer
1441 *
1442 * Light weight way to stop tracing. Use in conjunction with
1443 * tracing_start.
1444 */
1445void tracing_stop(void)
1446{
1447 struct ring_buffer *buffer;
1448 unsigned long flags;
1449
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001450 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001452 goto out;
1453
Steven Rostedta2f80712010-03-12 19:56:00 -05001454 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001455 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001457 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001458 if (buffer)
1459 ring_buffer_record_disable(buffer);
1460
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001461#ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001463 if (buffer)
1464 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001465#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001466
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001467 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001468
Steven Rostedt0f048702008-11-05 16:05:44 -05001469 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_stop_tr(struct trace_array *tr)
1474{
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 /* If global, we need to also stop the max tracer */
1479 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 return tracing_stop();
1481
1482 raw_spin_lock_irqsave(&tr->start_lock, flags);
1483 if (tr->stop_count++)
1484 goto out;
1485
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001486 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001487 if (buffer)
1488 ring_buffer_record_disable(buffer);
1489
1490 out:
1491 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001492}
1493
Ingo Molnare309b412008-05-12 21:20:51 +02001494void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001496static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497{
Carsten Emdea635cf02009-03-18 09:00:41 +01001498 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001499
1500 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001501 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001502
1503 /*
1504 * It's not the end of the world if we don't get
1505 * the lock, but we also don't want to spin
1506 * nor do we want to disable interrupts,
1507 * so if we miss here, then better luck next time.
1508 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001509 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001510 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001512 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001513 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001514 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515
Carsten Emdea635cf02009-03-18 09:00:41 +01001516 /*
1517 * Check whether the cmdline buffer at idx has a pid
1518 * mapped. We are going to overwrite that entry so we
1519 * need to clear the map_pid_to_cmdline. Otherwise we
1520 * would read the new comm for the old pid.
1521 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001522 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001523 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001524 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001526 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001529 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001530 }
1531
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001532 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001533
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001534 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001535
1536 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001537}
1538
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001539static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001540{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001541 unsigned map;
1542
Steven Rostedt4ca530852009-03-16 19:20:15 -04001543 if (!pid) {
1544 strcpy(comm, "<idle>");
1545 return;
1546 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001547
Steven Rostedt74bf4072010-01-25 15:11:53 -05001548 if (WARN_ON_ONCE(pid < 0)) {
1549 strcpy(comm, "<XXX>");
1550 return;
1551 }
1552
Steven Rostedt4ca530852009-03-16 19:20:15 -04001553 if (pid > PID_MAX_DEFAULT) {
1554 strcpy(comm, "<...>");
1555 return;
1556 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001558 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001559 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001560 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001561 else
1562 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001563}
1564
1565void trace_find_cmdline(int pid, char comm[])
1566{
1567 preempt_disable();
1568 arch_spin_lock(&trace_cmdline_lock);
1569
1570 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001571
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001572 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001573 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001574}
1575
Ingo Molnare309b412008-05-12 21:20:51 +02001576void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001578 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001579 return;
1580
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001581 if (!__this_cpu_read(trace_cmdline_save))
1582 return;
1583
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001584 if (trace_save_cmdline(tsk))
1585 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001586}
1587
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001588void
Steven Rostedt38697052008-10-01 13:14:09 -04001589tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1590 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591{
1592 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593
Steven Rostedt777e2082008-09-29 23:02:42 -04001594 entry->preempt_count = pc & 0xff;
1595 entry->pid = (tsk) ? tsk->pid : 0;
1596 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001597#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001598 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001599#else
1600 TRACE_FLAG_IRQS_NOSUPPORT |
1601#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001604 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001607EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001608
Steven Rostedte77405a2009-09-02 14:17:06 -04001609struct ring_buffer_event *
1610trace_buffer_lock_reserve(struct ring_buffer *buffer,
1611 int type,
1612 unsigned long len,
1613 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001614{
1615 struct ring_buffer_event *event;
1616
Steven Rostedte77405a2009-09-02 14:17:06 -04001617 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001618 if (event != NULL) {
1619 struct trace_entry *ent = ring_buffer_event_data(event);
1620
1621 tracing_generic_entry_update(ent, flags, pc);
1622 ent->type = type;
1623 }
1624
1625 return event;
1626}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001627
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001628void
1629__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1630{
1631 __this_cpu_write(trace_cmdline_save, true);
1632 ring_buffer_unlock_commit(buffer, event);
1633}
1634
Steven Rostedte77405a2009-09-02 14:17:06 -04001635static inline void
1636__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001638 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001639{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001640 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001641
Steven Rostedte77405a2009-09-02 14:17:06 -04001642 ftrace_trace_stack(buffer, flags, 6, pc);
1643 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001644}
1645
Steven Rostedte77405a2009-09-02 14:17:06 -04001646void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647 struct ring_buffer_event *event,
1648 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001649{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001650 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001651}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001652EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001653
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001654static struct ring_buffer *temp_buffer;
1655
Steven Rostedtef5580d2009-02-27 19:38:04 -05001656struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001657trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658 struct ftrace_event_file *ftrace_file,
1659 int type, unsigned long len,
1660 unsigned long flags, int pc)
1661{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001662 struct ring_buffer_event *entry;
1663
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001664 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001665 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001666 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001667 /*
1668 * If tracing is off, but we have triggers enabled
1669 * we still need to look at the event data. Use the temp_buffer
1670 * to store the trace event for the tigger to use. It's recusive
1671 * safe and will not be recorded anywhere.
1672 */
1673 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674 *current_rb = temp_buffer;
1675 entry = trace_buffer_lock_reserve(*current_rb,
1676 type, len, flags, pc);
1677 }
1678 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001679}
1680EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1681
1682struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001683trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001685 unsigned long flags, int pc)
1686{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001687 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001688 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001689 type, len, flags, pc);
1690}
Steven Rostedt94487d62009-05-05 19:22:53 -04001691EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001692
Steven Rostedte77405a2009-09-02 14:17:06 -04001693void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001695 unsigned long flags, int pc)
1696{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001697 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001698}
Steven Rostedt94487d62009-05-05 19:22:53 -04001699EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001700
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001701void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702 struct ring_buffer_event *event,
1703 unsigned long flags, int pc,
1704 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001705{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001706 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001707
1708 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709 ftrace_trace_userstack(buffer, flags, pc);
1710}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001712
Steven Rostedte77405a2009-09-02 14:17:06 -04001713void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001715{
Steven Rostedte77405a2009-09-02 14:17:06 -04001716 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001717}
Steven Rostedt12acd472009-04-17 16:01:56 -04001718EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001719
Ingo Molnare309b412008-05-12 21:20:51 +02001720void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001721trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001722 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1723 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001724{
Tom Zanussie1112b42009-03-31 00:48:49 -05001725 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001726 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001727 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001728 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001729
Steven Rostedtd7690412008-10-01 00:29:53 -04001730 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001731 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001732 return;
1733
Steven Rostedte77405a2009-09-02 14:17:06 -04001734 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001735 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001736 if (!event)
1737 return;
1738 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001739 entry->ip = ip;
1740 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001741
Tom Zanussif306cc82013-10-24 08:34:17 -05001742 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001743 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001744}
1745
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001746#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001747
1748#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749struct ftrace_stack {
1750 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1751};
1752
1753static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1755
Steven Rostedte77405a2009-09-02 14:17:06 -04001756static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001757 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001758 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001759{
Tom Zanussie1112b42009-03-31 00:48:49 -05001760 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001761 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001762 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001763 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001764 int use_stack;
1765 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001766
1767 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001768 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001769
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001770 /*
1771 * Since events can happen in NMIs there's no safe way to
1772 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773 * or NMI comes in, it will just have to use the default
1774 * FTRACE_STACK_SIZE.
1775 */
1776 preempt_disable_notrace();
1777
Shan Wei82146522012-11-19 13:21:01 +08001778 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001779 /*
1780 * We don't need any atomic variables, just a barrier.
1781 * If an interrupt comes in, we don't care, because it would
1782 * have exited and put the counter back to what we want.
1783 * We just need a barrier to keep gcc from moving things
1784 * around.
1785 */
1786 barrier();
1787 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001788 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001789 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1790
1791 if (regs)
1792 save_stack_trace_regs(regs, &trace);
1793 else
1794 save_stack_trace(&trace);
1795
1796 if (trace.nr_entries > size)
1797 size = trace.nr_entries;
1798 } else
1799 /* From now on, use_stack is a boolean */
1800 use_stack = 0;
1801
1802 size *= sizeof(unsigned long);
1803
1804 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1805 sizeof(*entry) + size, flags, pc);
1806 if (!event)
1807 goto out;
1808 entry = ring_buffer_event_data(event);
1809
1810 memset(&entry->caller, 0, size);
1811
1812 if (use_stack)
1813 memcpy(&entry->caller, trace.entries,
1814 trace.nr_entries * sizeof(unsigned long));
1815 else {
1816 trace.max_entries = FTRACE_STACK_ENTRIES;
1817 trace.entries = entry->caller;
1818 if (regs)
1819 save_stack_trace_regs(regs, &trace);
1820 else
1821 save_stack_trace(&trace);
1822 }
1823
1824 entry->size = trace.nr_entries;
1825
Tom Zanussif306cc82013-10-24 08:34:17 -05001826 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001827 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001828
1829 out:
1830 /* Again, don't let gcc optimize things here */
1831 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001832 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001833 preempt_enable_notrace();
1834
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001835}
1836
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001837void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838 int skip, int pc, struct pt_regs *regs)
1839{
1840 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1841 return;
1842
1843 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1844}
1845
Steven Rostedte77405a2009-09-02 14:17:06 -04001846void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1847 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001848{
1849 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850 return;
1851
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001852 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001853}
1854
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001855void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1856 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001857{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001858 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001859}
1860
Steven Rostedt03889382009-12-11 09:48:22 -05001861/**
1862 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001863 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001864 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001865void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001866{
1867 unsigned long flags;
1868
1869 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001870 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001871
1872 local_save_flags(flags);
1873
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001874 /*
1875 * Skip 3 more, seems to get us at the caller of
1876 * this function.
1877 */
1878 skip += 3;
1879 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001881}
1882
Steven Rostedt91e86e52010-11-10 12:56:12 +01001883static DEFINE_PER_CPU(int, user_stack_count);
1884
Steven Rostedte77405a2009-09-02 14:17:06 -04001885void
1886ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001887{
Tom Zanussie1112b42009-03-31 00:48:49 -05001888 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001889 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001890 struct userstack_entry *entry;
1891 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001892
1893 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894 return;
1895
Steven Rostedtb6345872010-03-12 20:03:30 -05001896 /*
1897 * NMIs can not handle page faults, even with fix ups.
1898 * The save user stack can (and often does) fault.
1899 */
1900 if (unlikely(in_nmi()))
1901 return;
1902
Steven Rostedt91e86e52010-11-10 12:56:12 +01001903 /*
1904 * prevent recursion, since the user stack tracing may
1905 * trigger other kernel events.
1906 */
1907 preempt_disable();
1908 if (__this_cpu_read(user_stack_count))
1909 goto out;
1910
1911 __this_cpu_inc(user_stack_count);
1912
Steven Rostedte77405a2009-09-02 14:17:06 -04001913 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001914 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001915 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001916 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001917 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001918
Steven Rostedt48659d32009-09-11 11:36:23 -04001919 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001920 memset(&entry->caller, 0, sizeof(entry->caller));
1921
1922 trace.nr_entries = 0;
1923 trace.max_entries = FTRACE_STACK_ENTRIES;
1924 trace.skip = 0;
1925 trace.entries = entry->caller;
1926
1927 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001928 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001929 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001930
Li Zefan1dbd1952010-12-09 15:47:56 +08001931 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001932 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001933 out:
1934 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001935}
1936
Hannes Eder4fd27352009-02-10 19:44:12 +01001937#ifdef UNUSED
1938static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001939{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001940 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001941}
Hannes Eder4fd27352009-02-10 19:44:12 +01001942#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001943
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001944#endif /* CONFIG_STACKTRACE */
1945
Steven Rostedt07d777f2011-09-22 14:01:55 -04001946/* created for use with alloc_percpu */
1947struct trace_buffer_struct {
1948 char buffer[TRACE_BUF_SIZE];
1949};
1950
1951static struct trace_buffer_struct *trace_percpu_buffer;
1952static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1955
1956/*
1957 * The buffer used is dependent on the context. There is a per cpu
1958 * buffer for normal context, softirq contex, hard irq context and
1959 * for NMI context. Thise allows for lockless recording.
1960 *
1961 * Note, if the buffers failed to be allocated, then this returns NULL
1962 */
1963static char *get_trace_buf(void)
1964{
1965 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001966
1967 /*
1968 * If we have allocated per cpu buffers, then we do not
1969 * need to do any locking.
1970 */
1971 if (in_nmi())
1972 percpu_buffer = trace_percpu_nmi_buffer;
1973 else if (in_irq())
1974 percpu_buffer = trace_percpu_irq_buffer;
1975 else if (in_softirq())
1976 percpu_buffer = trace_percpu_sirq_buffer;
1977 else
1978 percpu_buffer = trace_percpu_buffer;
1979
1980 if (!percpu_buffer)
1981 return NULL;
1982
Shan Weid8a03492012-11-13 09:53:04 +08001983 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001984}
1985
1986static int alloc_percpu_trace_buffer(void)
1987{
1988 struct trace_buffer_struct *buffers;
1989 struct trace_buffer_struct *sirq_buffers;
1990 struct trace_buffer_struct *irq_buffers;
1991 struct trace_buffer_struct *nmi_buffers;
1992
1993 buffers = alloc_percpu(struct trace_buffer_struct);
1994 if (!buffers)
1995 goto err_warn;
1996
1997 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998 if (!sirq_buffers)
1999 goto err_sirq;
2000
2001 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002 if (!irq_buffers)
2003 goto err_irq;
2004
2005 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006 if (!nmi_buffers)
2007 goto err_nmi;
2008
2009 trace_percpu_buffer = buffers;
2010 trace_percpu_sirq_buffer = sirq_buffers;
2011 trace_percpu_irq_buffer = irq_buffers;
2012 trace_percpu_nmi_buffer = nmi_buffers;
2013
2014 return 0;
2015
2016 err_nmi:
2017 free_percpu(irq_buffers);
2018 err_irq:
2019 free_percpu(sirq_buffers);
2020 err_sirq:
2021 free_percpu(buffers);
2022 err_warn:
2023 WARN(1, "Could not allocate percpu trace_printk buffer");
2024 return -ENOMEM;
2025}
2026
Steven Rostedt81698832012-10-11 10:15:05 -04002027static int buffers_allocated;
2028
Steven Rostedt07d777f2011-09-22 14:01:55 -04002029void trace_printk_init_buffers(void)
2030{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002031 if (buffers_allocated)
2032 return;
2033
2034 if (alloc_percpu_trace_buffer())
2035 return;
2036
Steven Rostedt2184db42014-05-28 13:14:40 -04002037 /* trace_printk() is for debug use only. Don't use it in production. */
2038
2039 pr_warning("\n**********************************************************\n");
2040 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2041 pr_warning("** **\n");
2042 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002045 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002046 pr_warning("** **\n");
2047 pr_warning("** If you see this message and you are not debugging **\n");
2048 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2049 pr_warning("** **\n");
2050 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2051 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002052
Steven Rostedtb382ede62012-10-10 21:44:34 -04002053 /* Expand the buffers to set size */
2054 tracing_update_buffers();
2055
Steven Rostedt07d777f2011-09-22 14:01:55 -04002056 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002057
2058 /*
2059 * trace_printk_init_buffers() can be called by modules.
2060 * If that happens, then we need to start cmdline recording
2061 * directly here. If the global_trace.buffer is already
2062 * allocated here, then this was called by module code.
2063 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002064 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002065 tracing_start_cmdline_record();
2066}
2067
2068void trace_printk_start_comm(void)
2069{
2070 /* Start tracing comms if trace printk is set */
2071 if (!buffers_allocated)
2072 return;
2073 tracing_start_cmdline_record();
2074}
2075
2076static void trace_printk_start_stop_comm(int enabled)
2077{
2078 if (!buffers_allocated)
2079 return;
2080
2081 if (enabled)
2082 tracing_start_cmdline_record();
2083 else
2084 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002085}
2086
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002087/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002088 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002089 *
2090 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002091int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002092{
Tom Zanussie1112b42009-03-31 00:48:49 -05002093 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002094 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002095 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002096 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002097 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002098 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002099 char *tbuffer;
2100 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002101
2102 if (unlikely(tracing_selftest_running || tracing_disabled))
2103 return 0;
2104
2105 /* Don't pollute graph traces with trace_vprintk internals */
2106 pause_graph_tracing();
2107
2108 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002109 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002110
Steven Rostedt07d777f2011-09-22 14:01:55 -04002111 tbuffer = get_trace_buf();
2112 if (!tbuffer) {
2113 len = 0;
2114 goto out;
2115 }
2116
2117 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2118
2119 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002120 goto out;
2121
Steven Rostedt07d777f2011-09-22 14:01:55 -04002122 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002123 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002124 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002125 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2126 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002127 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002128 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002129 entry = ring_buffer_event_data(event);
2130 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002131 entry->fmt = fmt;
2132
Steven Rostedt07d777f2011-09-22 14:01:55 -04002133 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002134 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002135 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002136 ftrace_trace_stack(buffer, flags, 6, pc);
2137 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002138
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002139out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002140 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002141 unpause_graph_tracing();
2142
2143 return len;
2144}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002145EXPORT_SYMBOL_GPL(trace_vbprintk);
2146
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002147static int
2148__trace_array_vprintk(struct ring_buffer *buffer,
2149 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002150{
Tom Zanussie1112b42009-03-31 00:48:49 -05002151 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002152 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002153 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002154 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002155 unsigned long flags;
2156 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002157
2158 if (tracing_disabled || tracing_selftest_running)
2159 return 0;
2160
Steven Rostedt07d777f2011-09-22 14:01:55 -04002161 /* Don't pollute graph traces with trace_vprintk internals */
2162 pause_graph_tracing();
2163
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002164 pc = preempt_count();
2165 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002166
Steven Rostedt07d777f2011-09-22 14:01:55 -04002167
2168 tbuffer = get_trace_buf();
2169 if (!tbuffer) {
2170 len = 0;
2171 goto out;
2172 }
2173
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002174 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002175
Steven Rostedt07d777f2011-09-22 14:01:55 -04002176 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002177 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002178 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002179 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002180 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002181 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002182 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002183 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002184
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002185 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002186 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002187 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002188 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002189 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002190 out:
2191 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002192 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002193
2194 return len;
2195}
Steven Rostedt659372d2009-09-03 19:11:07 -04002196
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002197int trace_array_vprintk(struct trace_array *tr,
2198 unsigned long ip, const char *fmt, va_list args)
2199{
2200 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2201}
2202
2203int trace_array_printk(struct trace_array *tr,
2204 unsigned long ip, const char *fmt, ...)
2205{
2206 int ret;
2207 va_list ap;
2208
2209 if (!(trace_flags & TRACE_ITER_PRINTK))
2210 return 0;
2211
2212 va_start(ap, fmt);
2213 ret = trace_array_vprintk(tr, ip, fmt, ap);
2214 va_end(ap);
2215 return ret;
2216}
2217
2218int trace_array_printk_buf(struct ring_buffer *buffer,
2219 unsigned long ip, const char *fmt, ...)
2220{
2221 int ret;
2222 va_list ap;
2223
2224 if (!(trace_flags & TRACE_ITER_PRINTK))
2225 return 0;
2226
2227 va_start(ap, fmt);
2228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2229 va_end(ap);
2230 return ret;
2231}
2232
Steven Rostedt659372d2009-09-03 19:11:07 -04002233int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2234{
Steven Rostedta813a152009-10-09 01:41:35 -04002235 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002236}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002237EXPORT_SYMBOL_GPL(trace_vprintk);
2238
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002239static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002240{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2242
Steven Rostedt5a90f572008-09-03 17:42:51 -04002243 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002244 if (buf_iter)
2245 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002246}
2247
Ingo Molnare309b412008-05-12 21:20:51 +02002248static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002249peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002251{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002252 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002254
Steven Rostedtd7690412008-10-01 00:29:53 -04002255 if (buf_iter)
2256 event = ring_buffer_iter_peek(buf_iter, ts);
2257 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002258 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002259 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002260
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002261 if (event) {
2262 iter->ent_size = ring_buffer_event_length(event);
2263 return ring_buffer_event_data(event);
2264 }
2265 iter->ent_size = 0;
2266 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267}
Steven Rostedtd7690412008-10-01 00:29:53 -04002268
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002269static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002270__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002273 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002274 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002275 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002276 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002277 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002278 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002279 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002280 int cpu;
2281
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002282 /*
2283 * If we are in a per_cpu trace file, don't bother by iterating over
2284 * all cpu and peek directly.
2285 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002286 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002287 if (ring_buffer_empty_cpu(buffer, cpu_file))
2288 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002289 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002290 if (ent_cpu)
2291 *ent_cpu = cpu_file;
2292
2293 return ent;
2294 }
2295
Steven Rostedtab464282008-05-12 21:21:00 +02002296 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002297
2298 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002300
Steven Rostedtbc21b472010-03-31 19:49:26 -04002301 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002302
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02002303 /*
2304 * Pick the entry with the smallest timestamp:
2305 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002306 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307 next = ent;
2308 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002309 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002310 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002311 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002312 }
2313 }
2314
Steven Rostedt12b5da32012-03-27 10:43:28 -04002315 iter->ent_size = next_size;
2316
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002317 if (ent_cpu)
2318 *ent_cpu = next_cpu;
2319
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002320 if (ent_ts)
2321 *ent_ts = next_ts;
2322
Steven Rostedtbc21b472010-03-31 19:49:26 -04002323 if (missing_events)
2324 *missing_events = next_lost;
2325
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326 return next;
2327}
2328
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002329/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002330struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002332{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002333 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002334}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002335
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002336/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002337void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002338{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002339 iter->ent = __find_next_entry(iter, &iter->cpu,
2340 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002341
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002342 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002343 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002344
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002345 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002346}
2347
Ingo Molnare309b412008-05-12 21:20:51 +02002348static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002349{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002350 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002351 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002352}
2353
Ingo Molnare309b412008-05-12 21:20:51 +02002354static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355{
2356 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002357 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002358 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002359
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002360 WARN_ON_ONCE(iter->leftover);
2361
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362 (*pos)++;
2363
2364 /* can't go backwards */
2365 if (iter->idx > i)
2366 return NULL;
2367
2368 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002369 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002370 else
2371 ent = iter;
2372
2373 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002374 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002375
2376 iter->pos = *pos;
2377
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378 return ent;
2379}
2380
Jason Wessel955b61e2010-08-05 09:22:23 -05002381void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002382{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002383 struct ring_buffer_event *event;
2384 struct ring_buffer_iter *buf_iter;
2385 unsigned long entries = 0;
2386 u64 ts;
2387
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002388 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002389
Steven Rostedt6d158a82012-06-27 20:46:14 -04002390 buf_iter = trace_buffer_iter(iter, cpu);
2391 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002392 return;
2393
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002394 ring_buffer_iter_reset(buf_iter);
2395
2396 /*
2397 * We could have the case with the max latency tracers
2398 * that a reset never took place on a cpu. This is evident
2399 * by the timestamp being before the start of the buffer.
2400 */
2401 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002402 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002403 break;
2404 entries++;
2405 ring_buffer_read(buf_iter, NULL);
2406 }
2407
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002408 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002409}
2410
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002411/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002412 * The current tracer is copied to avoid a global locking
2413 * all around.
2414 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002415static void *s_start(struct seq_file *m, loff_t *pos)
2416{
2417 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002418 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002419 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420 void *p = NULL;
2421 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002422 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002423
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002424 /*
2425 * copy the tracer to avoid using a global lock all around.
2426 * iter->trace is a copy of current_trace, the pointer to the
2427 * name may be used instead of a strcmp(), as iter->trace->name
2428 * will point to the same string as current_trace->name.
2429 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002431 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002433 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002435#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002436 if (iter->snapshot && iter->trace->use_max_tr)
2437 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002438#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002439
2440 if (!iter->snapshot)
2441 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443 if (*pos != iter->pos) {
2444 iter->ent = NULL;
2445 iter->cpu = 0;
2446 iter->idx = -1;
2447
Steven Rostedtae3b5092013-01-23 15:22:59 -05002448 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002449 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002450 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002451 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002452 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453
Lai Jiangshanac91d852010-03-02 17:54:50 +08002454 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002455 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2456 ;
2457
2458 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002459 /*
2460 * If we overflowed the seq_file before, then we want
2461 * to just reuse the trace_seq buffer again.
2462 */
2463 if (iter->leftover)
2464 p = iter;
2465 else {
2466 l = *pos - 1;
2467 p = s_next(m, p, &l);
2468 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002469 }
2470
Lai Jiangshan4f535962009-05-18 19:35:34 +08002471 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002472 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002473 return p;
2474}
2475
2476static void s_stop(struct seq_file *m, void *p)
2477{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002478 struct trace_iterator *iter = m->private;
2479
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002480#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002481 if (iter->snapshot && iter->trace->use_max_tr)
2482 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002484
2485 if (!iter->snapshot)
2486 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002487
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002488 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002489 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002490}
2491
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002492static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002493get_total_entries(struct trace_buffer *buf,
2494 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002495{
2496 unsigned long count;
2497 int cpu;
2498
2499 *total = 0;
2500 *entries = 0;
2501
2502 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002503 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002504 /*
2505 * If this buffer has skipped entries, then we hold all
2506 * entries for the trace and we need to ignore the
2507 * ones before the time stamp.
2508 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002509 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002511 /* total is the same as the entries */
2512 *total += count;
2513 } else
2514 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002515 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002516 *entries += count;
2517 }
2518}
2519
Ingo Molnare309b412008-05-12 21:20:51 +02002520static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002521{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002522 seq_puts(m, "# _------=> CPU# \n"
2523 "# / _-----=> irqs-off \n"
2524 "# | / _----=> need-resched \n"
2525 "# || / _---=> hardirq/softirq \n"
2526 "# ||| / _--=> preempt-depth \n"
2527 "# |||| / delay \n"
2528 "# cmd pid ||||| time | caller \n"
2529 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530}
2531
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002532static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002533{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002534 unsigned long total;
2535 unsigned long entries;
2536
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002537 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002538 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2539 entries, total, num_online_cpus());
2540 seq_puts(m, "#\n");
2541}
2542
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002544{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002545 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002546 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2547 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548}
2549
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002550static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002551{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002552 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002553 seq_puts(m, "# _-----=> irqs-off\n"
2554 "# / _----=> need-resched\n"
2555 "# | / _---=> hardirq/softirq\n"
2556 "# || / _--=> preempt-depth\n"
2557 "# ||| / delay\n"
2558 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2559 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002560}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561
Jiri Olsa62b915f2010-04-02 19:01:22 +02002562void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2564{
2565 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002566 struct trace_buffer *buf = iter->trace_buffer;
2567 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002568 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002569 unsigned long entries;
2570 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002571 const char *name = "preemption";
2572
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002573 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002575 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002577 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002578 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002579 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002581 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002583 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002585 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002586 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002587#if defined(CONFIG_PREEMPT_NONE)
2588 "server",
2589#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2590 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002591#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002592 "preempt",
2593#else
2594 "unknown",
2595#endif
2596 /* These are reserved for later use */
2597 0, 0, 0, 0);
2598#ifdef CONFIG_SMP
2599 seq_printf(m, " #P:%d)\n", num_online_cpus());
2600#else
2601 seq_puts(m, ")\n");
2602#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002603 seq_puts(m, "# -----------------\n");
2604 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002605 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002606 data->comm, data->pid,
2607 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002609 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002610
2611 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002612 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002613 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002615 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002616 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002618 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002619 }
2620
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002621 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002622}
2623
Steven Rostedta3097202008-11-07 22:36:02 -05002624static void test_cpu_buff_start(struct trace_iterator *iter)
2625{
2626 struct trace_seq *s = &iter->seq;
2627
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002628 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2629 return;
2630
2631 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2632 return;
2633
Rusty Russell44623442009-01-01 10:12:23 +10302634 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002635 return;
2636
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002637 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002638 return;
2639
Rusty Russell44623442009-01-01 10:12:23 +10302640 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002641
2642 /* Don't print started cpu buffer for the first entry of the trace */
2643 if (iter->idx > 1)
2644 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2645 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002646}
2647
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002648static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002649{
Steven Rostedt214023c2008-05-12 21:20:46 +02002650 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002651 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002652 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002653 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002654
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002655 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002656
Steven Rostedta3097202008-11-07 22:36:02 -05002657 test_cpu_buff_start(iter);
2658
Steven Rostedtf633cef2008-12-23 23:24:13 -05002659 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002660
2661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002662 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2663 trace_print_lat_context(iter);
2664 else
2665 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002666 }
2667
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002668 if (trace_seq_has_overflowed(s))
2669 return TRACE_TYPE_PARTIAL_LINE;
2670
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002671 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002672 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002673
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002674 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002675
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002676 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002677}
2678
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002679static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002680{
2681 struct trace_seq *s = &iter->seq;
2682 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002683 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002684
2685 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002686
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002687 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2688 trace_seq_printf(s, "%d %d %llu ",
2689 entry->pid, iter->cpu, iter->ts);
2690
2691 if (trace_seq_has_overflowed(s))
2692 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002693
Steven Rostedtf633cef2008-12-23 23:24:13 -05002694 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002695 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002696 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002697
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002698 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002699
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002700 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002701}
2702
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002703static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002704{
2705 struct trace_seq *s = &iter->seq;
2706 unsigned char newline = '\n';
2707 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002708 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002709
2710 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002711
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002712 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002713 SEQ_PUT_HEX_FIELD(s, entry->pid);
2714 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2715 SEQ_PUT_HEX_FIELD(s, iter->ts);
2716 if (trace_seq_has_overflowed(s))
2717 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002718 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002719
Steven Rostedtf633cef2008-12-23 23:24:13 -05002720 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002721 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002722 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002723 if (ret != TRACE_TYPE_HANDLED)
2724 return ret;
2725 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002726
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002727 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002728
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002729 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002730}
2731
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002732static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002733{
2734 struct trace_seq *s = &iter->seq;
2735 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002736 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002737
2738 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002739
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002740 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002741 SEQ_PUT_FIELD(s, entry->pid);
2742 SEQ_PUT_FIELD(s, iter->cpu);
2743 SEQ_PUT_FIELD(s, iter->ts);
2744 if (trace_seq_has_overflowed(s))
2745 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002746 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002747
Steven Rostedtf633cef2008-12-23 23:24:13 -05002748 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002749 return event ? event->funcs->binary(iter, 0, event) :
2750 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002751}
2752
Jiri Olsa62b915f2010-04-02 19:01:22 +02002753int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002754{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002755 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002756 int cpu;
2757
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002758 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002759 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002760 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002761 buf_iter = trace_buffer_iter(iter, cpu);
2762 if (buf_iter) {
2763 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002764 return 0;
2765 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002766 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002767 return 0;
2768 }
2769 return 1;
2770 }
2771
Steven Rostedtab464282008-05-12 21:21:00 +02002772 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002773 buf_iter = trace_buffer_iter(iter, cpu);
2774 if (buf_iter) {
2775 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002776 return 0;
2777 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002778 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002779 return 0;
2780 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002781 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002782
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002783 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002784}
2785
Lai Jiangshan4f535962009-05-18 19:35:34 +08002786/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002787enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002788{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002789 enum print_line_t ret;
2790
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002791 if (iter->lost_events) {
2792 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2793 iter->cpu, iter->lost_events);
2794 if (trace_seq_has_overflowed(&iter->seq))
2795 return TRACE_TYPE_PARTIAL_LINE;
2796 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002797
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002798 if (iter->trace && iter->trace->print_line) {
2799 ret = iter->trace->print_line(iter);
2800 if (ret != TRACE_TYPE_UNHANDLED)
2801 return ret;
2802 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002803
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002804 if (iter->ent->type == TRACE_BPUTS &&
2805 trace_flags & TRACE_ITER_PRINTK &&
2806 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2807 return trace_print_bputs_msg_only(iter);
2808
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002809 if (iter->ent->type == TRACE_BPRINT &&
2810 trace_flags & TRACE_ITER_PRINTK &&
2811 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002812 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002813
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002814 if (iter->ent->type == TRACE_PRINT &&
2815 trace_flags & TRACE_ITER_PRINTK &&
2816 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002817 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002818
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002819 if (trace_flags & TRACE_ITER_BIN)
2820 return print_bin_fmt(iter);
2821
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002822 if (trace_flags & TRACE_ITER_HEX)
2823 return print_hex_fmt(iter);
2824
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002825 if (trace_flags & TRACE_ITER_RAW)
2826 return print_raw_fmt(iter);
2827
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002828 return print_trace_fmt(iter);
2829}
2830
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002831void trace_latency_header(struct seq_file *m)
2832{
2833 struct trace_iterator *iter = m->private;
2834
2835 /* print nothing if the buffers are empty */
2836 if (trace_empty(iter))
2837 return;
2838
2839 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2840 print_trace_header(m, iter);
2841
2842 if (!(trace_flags & TRACE_ITER_VERBOSE))
2843 print_lat_help_header(m);
2844}
2845
Jiri Olsa62b915f2010-04-02 19:01:22 +02002846void trace_default_header(struct seq_file *m)
2847{
2848 struct trace_iterator *iter = m->private;
2849
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002850 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2851 return;
2852
Jiri Olsa62b915f2010-04-02 19:01:22 +02002853 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2854 /* print nothing if the buffers are empty */
2855 if (trace_empty(iter))
2856 return;
2857 print_trace_header(m, iter);
2858 if (!(trace_flags & TRACE_ITER_VERBOSE))
2859 print_lat_help_header(m);
2860 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002861 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2862 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002863 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002864 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002865 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002866 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002867 }
2868}
2869
Steven Rostedte0a413f2011-09-29 21:26:16 -04002870static void test_ftrace_alive(struct seq_file *m)
2871{
2872 if (!ftrace_is_dead())
2873 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002874 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2875 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002876}
2877
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002878#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002879static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002880{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002881 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2882 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2883 "# Takes a snapshot of the main buffer.\n"
2884 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2885 "# (Doesn't have to be '2' works with any number that\n"
2886 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002887}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002888
2889static void show_snapshot_percpu_help(struct seq_file *m)
2890{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002891 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002892#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002893 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2894 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002895#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002896 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2897 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002898#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002899 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2900 "# (Doesn't have to be '2' works with any number that\n"
2901 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002902}
2903
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002904static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2905{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002906 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002907 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002908 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002909 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002910
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002911 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002912 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2913 show_snapshot_main_help(m);
2914 else
2915 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002916}
2917#else
2918/* Should never be called */
2919static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2920#endif
2921
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002922static int s_show(struct seq_file *m, void *v)
2923{
2924 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002925 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002926
2927 if (iter->ent == NULL) {
2928 if (iter->tr) {
2929 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2930 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002931 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002932 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002933 if (iter->snapshot && trace_empty(iter))
2934 print_snapshot_help(m, iter);
2935 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002936 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002937 else
2938 trace_default_header(m);
2939
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002940 } else if (iter->leftover) {
2941 /*
2942 * If we filled the seq_file buffer earlier, we
2943 * want to just show it now.
2944 */
2945 ret = trace_print_seq(m, &iter->seq);
2946
2947 /* ret should this time be zero, but you never know */
2948 iter->leftover = ret;
2949
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002950 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002951 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002952 ret = trace_print_seq(m, &iter->seq);
2953 /*
2954 * If we overflow the seq_file buffer, then it will
2955 * ask us for this data again at start up.
2956 * Use that instead.
2957 * ret is 0 if seq_file write succeeded.
2958 * -1 otherwise.
2959 */
2960 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002961 }
2962
2963 return 0;
2964}
2965
Oleg Nesterov649e9c702013-07-23 17:25:54 +02002966/*
2967 * Should be used after trace_array_get(), trace_types_lock
2968 * ensures that i_cdev was already initialized.
2969 */
2970static inline int tracing_get_cpu(struct inode *inode)
2971{
2972 if (inode->i_cdev) /* See trace_create_cpu_file() */
2973 return (long)inode->i_cdev - 1;
2974 return RING_BUFFER_ALL_CPUS;
2975}
2976
James Morris88e9d342009-09-22 16:43:43 -07002977static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002978 .start = s_start,
2979 .next = s_next,
2980 .stop = s_stop,
2981 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002982};
2983
Ingo Molnare309b412008-05-12 21:20:51 +02002984static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002985__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002986{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002987 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002988 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002989 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002990
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002991 if (tracing_disabled)
2992 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002993
Jiri Olsa50e18b92012-04-25 10:23:39 +02002994 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002995 if (!iter)
2996 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002997
Steven Rostedt6d158a82012-06-27 20:46:14 -04002998 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2999 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003000 if (!iter->buffer_iter)
3001 goto release;
3002
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003003 /*
3004 * We make a copy of the current tracer to avoid concurrent
3005 * changes on it while we are reading.
3006 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003007 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003008 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003009 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003010 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003011
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003012 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003013
Li Zefan79f55992009-06-15 14:58:26 +08003014 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003015 goto fail;
3016
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003017 iter->tr = tr;
3018
3019#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003020 /* Currently only the top directory has a snapshot */
3021 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003022 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003023 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003024#endif
3025 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003026 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003027 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003028 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003029 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003030
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003031 /* Notify the tracer early; before we stop tracing. */
3032 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003033 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003034
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003035 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003036 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003037 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3038
David Sharp8be07092012-11-13 12:18:22 -08003039 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003040 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003041 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3042
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003043 /* stop the trace while dumping if we are not opening "snapshot" */
3044 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003045 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003046
Steven Rostedtae3b5092013-01-23 15:22:59 -05003047 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003048 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003049 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003050 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003051 }
3052 ring_buffer_read_prepare_sync();
3053 for_each_tracing_cpu(cpu) {
3054 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003055 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003056 }
3057 } else {
3058 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003059 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003060 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003061 ring_buffer_read_prepare_sync();
3062 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003063 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003064 }
3065
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003066 mutex_unlock(&trace_types_lock);
3067
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003068 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003069
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003070 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003071 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003072 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003073 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003074release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003075 seq_release_private(inode, file);
3076 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003077}
3078
3079int tracing_open_generic(struct inode *inode, struct file *filp)
3080{
Steven Rostedt60a11772008-05-12 21:20:44 +02003081 if (tracing_disabled)
3082 return -ENODEV;
3083
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003084 filp->private_data = inode->i_private;
3085 return 0;
3086}
3087
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003088bool tracing_is_disabled(void)
3089{
3090 return (tracing_disabled) ? true: false;
3091}
3092
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003093/*
3094 * Open and update trace_array ref count.
3095 * Must have the current trace_array passed to it.
3096 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003097static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003098{
3099 struct trace_array *tr = inode->i_private;
3100
3101 if (tracing_disabled)
3102 return -ENODEV;
3103
3104 if (trace_array_get(tr) < 0)
3105 return -ENODEV;
3106
3107 filp->private_data = inode->i_private;
3108
3109 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003110}
3111
Hannes Eder4fd27352009-02-10 19:44:12 +01003112static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003114 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003115 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003116 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003117 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003118
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003119 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003120 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003121 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003122 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003123
Oleg Nesterov6484c712013-07-23 17:26:10 +02003124 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003125 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003126 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003127
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003128 for_each_tracing_cpu(cpu) {
3129 if (iter->buffer_iter[cpu])
3130 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3131 }
3132
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003133 if (iter->trace && iter->trace->close)
3134 iter->trace->close(iter);
3135
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003136 if (!iter->snapshot)
3137 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003138 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003139
3140 __trace_array_put(tr);
3141
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003142 mutex_unlock(&trace_types_lock);
3143
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003144 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003145 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003146 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003147 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003148 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003149
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003150 return 0;
3151}
3152
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003153static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3154{
3155 struct trace_array *tr = inode->i_private;
3156
3157 trace_array_put(tr);
3158 return 0;
3159}
3160
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003161static int tracing_single_release_tr(struct inode *inode, struct file *file)
3162{
3163 struct trace_array *tr = inode->i_private;
3164
3165 trace_array_put(tr);
3166
3167 return single_release(inode, file);
3168}
3169
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003170static int tracing_open(struct inode *inode, struct file *file)
3171{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003172 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003173 struct trace_iterator *iter;
3174 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003175
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003176 if (trace_array_get(tr) < 0)
3177 return -ENODEV;
3178
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003179 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003180 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3181 int cpu = tracing_get_cpu(inode);
3182
3183 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003184 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003185 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003186 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003187 }
3188
3189 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003190 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003191 if (IS_ERR(iter))
3192 ret = PTR_ERR(iter);
3193 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3194 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3195 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003196
3197 if (ret < 0)
3198 trace_array_put(tr);
3199
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003200 return ret;
3201}
3202
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003203/*
3204 * Some tracers are not suitable for instance buffers.
3205 * A tracer is always available for the global array (toplevel)
3206 * or if it explicitly states that it is.
3207 */
3208static bool
3209trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3210{
3211 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3212}
3213
3214/* Find the next tracer that this trace array may use */
3215static struct tracer *
3216get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3217{
3218 while (t && !trace_ok_for_array(t, tr))
3219 t = t->next;
3220
3221 return t;
3222}
3223
Ingo Molnare309b412008-05-12 21:20:51 +02003224static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003225t_next(struct seq_file *m, void *v, loff_t *pos)
3226{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003227 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003228 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003229
3230 (*pos)++;
3231
3232 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003233 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003234
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003235 return t;
3236}
3237
3238static void *t_start(struct seq_file *m, loff_t *pos)
3239{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003240 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003241 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003242 loff_t l = 0;
3243
3244 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003245
3246 t = get_tracer_for_array(tr, trace_types);
3247 for (; t && l < *pos; t = t_next(m, t, &l))
3248 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003249
3250 return t;
3251}
3252
3253static void t_stop(struct seq_file *m, void *p)
3254{
3255 mutex_unlock(&trace_types_lock);
3256}
3257
3258static int t_show(struct seq_file *m, void *v)
3259{
3260 struct tracer *t = v;
3261
3262 if (!t)
3263 return 0;
3264
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003265 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003266 if (t->next)
3267 seq_putc(m, ' ');
3268 else
3269 seq_putc(m, '\n');
3270
3271 return 0;
3272}
3273
James Morris88e9d342009-09-22 16:43:43 -07003274static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003275 .start = t_start,
3276 .next = t_next,
3277 .stop = t_stop,
3278 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003279};
3280
3281static int show_traces_open(struct inode *inode, struct file *file)
3282{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003283 struct trace_array *tr = inode->i_private;
3284 struct seq_file *m;
3285 int ret;
3286
Steven Rostedt60a11772008-05-12 21:20:44 +02003287 if (tracing_disabled)
3288 return -ENODEV;
3289
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003290 ret = seq_open(file, &show_traces_seq_ops);
3291 if (ret)
3292 return ret;
3293
3294 m = file->private_data;
3295 m->private = tr;
3296
3297 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003298}
3299
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003300static ssize_t
3301tracing_write_stub(struct file *filp, const char __user *ubuf,
3302 size_t count, loff_t *ppos)
3303{
3304 return count;
3305}
3306
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003307loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003308{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003309 int ret;
3310
Slava Pestov364829b2010-11-24 15:13:16 -08003311 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003312 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003313 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003314 file->f_pos = ret = 0;
3315
3316 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003317}
3318
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003319static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003320 .open = tracing_open,
3321 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003322 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003323 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003324 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003325};
3326
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003327static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003328 .open = show_traces_open,
3329 .read = seq_read,
3330 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003331 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003332};
3333
Ingo Molnar36dfe922008-05-12 21:20:52 +02003334/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003335 * The tracer itself will not take this lock, but still we want
3336 * to provide a consistent cpumask to user-space:
3337 */
3338static DEFINE_MUTEX(tracing_cpumask_update_lock);
3339
3340/*
3341 * Temporary storage for the character representation of the
3342 * CPU bitmask (and one more byte for the newline):
3343 */
3344static char mask_str[NR_CPUS + 1];
3345
Ingo Molnarc7078de2008-05-12 21:20:52 +02003346static ssize_t
3347tracing_cpumask_read(struct file *filp, char __user *ubuf,
3348 size_t count, loff_t *ppos)
3349{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003350 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003351 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003352
3353 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003354
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003355 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003356 if (count - len < 2) {
3357 count = -EINVAL;
3358 goto out_err;
3359 }
3360 len += sprintf(mask_str + len, "\n");
3361 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3362
3363out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003364 mutex_unlock(&tracing_cpumask_update_lock);
3365
3366 return count;
3367}
3368
3369static ssize_t
3370tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3371 size_t count, loff_t *ppos)
3372{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003373 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303374 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003375 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303376
3377 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3378 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003379
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303380 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003381 if (err)
3382 goto err_unlock;
3383
Li Zefan215368e2009-06-15 10:56:42 +08003384 mutex_lock(&tracing_cpumask_update_lock);
3385
Steven Rostedta5e25882008-12-02 15:34:05 -05003386 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003387 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003388 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003389 /*
3390 * Increase/decrease the disabled counter if we are
3391 * about to flip a bit in the cpumask:
3392 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003393 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303394 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003395 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3396 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003397 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003398 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303399 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003400 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3401 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003402 }
3403 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003404 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003405 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003406
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003407 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003408
Ingo Molnarc7078de2008-05-12 21:20:52 +02003409 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303410 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003411
Ingo Molnarc7078de2008-05-12 21:20:52 +02003412 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003413
3414err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003415 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003416
3417 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003418}
3419
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003420static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003421 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003422 .read = tracing_cpumask_read,
3423 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003424 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003425 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426};
3427
Li Zefanfdb372e2009-12-08 11:15:59 +08003428static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003429{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003430 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003431 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003432 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003433 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003434
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003435 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003436 tracer_flags = tr->current_trace->flags->val;
3437 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003438
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003439 for (i = 0; trace_options[i]; i++) {
3440 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003441 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003443 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003444 }
3445
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003446 for (i = 0; trace_opts[i].name; i++) {
3447 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003448 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003449 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003450 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003451 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003452 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003453
Li Zefanfdb372e2009-12-08 11:15:59 +08003454 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003455}
3456
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003457static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003458 struct tracer_flags *tracer_flags,
3459 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003460{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003461 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003462 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003463
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003464 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003465 if (ret)
3466 return ret;
3467
3468 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003469 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003470 else
Zhaolei77708412009-08-07 18:53:21 +08003471 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003472 return 0;
3473}
3474
Li Zefan8d18eaa2009-12-08 11:17:06 +08003475/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003476static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003477{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003478 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003479 struct tracer_flags *tracer_flags = trace->flags;
3480 struct tracer_opt *opts = NULL;
3481 int i;
3482
3483 for (i = 0; tracer_flags->opts[i].name; i++) {
3484 opts = &tracer_flags->opts[i];
3485
3486 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003487 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003488 }
3489
3490 return -EINVAL;
3491}
3492
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003493/* Some tracers require overwrite to stay enabled */
3494int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3495{
3496 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3497 return -1;
3498
3499 return 0;
3500}
3501
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003502int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003503{
3504 /* do nothing if flag is already set */
3505 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003506 return 0;
3507
3508 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003509 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003510 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003511 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003512
3513 if (enabled)
3514 trace_flags |= mask;
3515 else
3516 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003517
3518 if (mask == TRACE_ITER_RECORD_CMD)
3519 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003520
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003521 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003522 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003523#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003524 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003525#endif
3526 }
Steven Rostedt81698832012-10-11 10:15:05 -04003527
3528 if (mask == TRACE_ITER_PRINTK)
3529 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003530
3531 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003532}
3533
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003534static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003536 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003537 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003538 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003539 int i;
3540
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003541 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003542
Li Zefan8d18eaa2009-12-08 11:17:06 +08003543 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003544 neg = 1;
3545 cmp += 2;
3546 }
3547
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003548 mutex_lock(&trace_types_lock);
3549
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003550 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003551 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003552 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003553 break;
3554 }
3555 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003556
3557 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003558 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003559 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003560
3561 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003562
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003563 return ret;
3564}
3565
3566static ssize_t
3567tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3568 size_t cnt, loff_t *ppos)
3569{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003570 struct seq_file *m = filp->private_data;
3571 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003572 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003573 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003574
3575 if (cnt >= sizeof(buf))
3576 return -EINVAL;
3577
3578 if (copy_from_user(&buf, ubuf, cnt))
3579 return -EFAULT;
3580
Steven Rostedta8dd2172013-01-09 20:54:17 -05003581 buf[cnt] = 0;
3582
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003583 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003584 if (ret < 0)
3585 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003586
Jiri Olsacf8517c2009-10-23 19:36:16 -04003587 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003588
3589 return cnt;
3590}
3591
Li Zefanfdb372e2009-12-08 11:15:59 +08003592static int tracing_trace_options_open(struct inode *inode, struct file *file)
3593{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003594 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003595 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003596
Li Zefanfdb372e2009-12-08 11:15:59 +08003597 if (tracing_disabled)
3598 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003599
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003600 if (trace_array_get(tr) < 0)
3601 return -ENODEV;
3602
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003603 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3604 if (ret < 0)
3605 trace_array_put(tr);
3606
3607 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003608}
3609
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003610static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003611 .open = tracing_trace_options_open,
3612 .read = seq_read,
3613 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003614 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003615 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003616};
3617
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003618static const char readme_msg[] =
3619 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003620 "# echo 0 > tracing_on : quick way to disable tracing\n"
3621 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3622 " Important files:\n"
3623 " trace\t\t\t- The static contents of the buffer\n"
3624 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3625 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3626 " current_tracer\t- function and latency tracers\n"
3627 " available_tracers\t- list of configured tracers for current_tracer\n"
3628 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3629 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3630 " trace_clock\t\t-change the clock used to order events\n"
3631 " local: Per cpu clock but may not be synced across CPUs\n"
3632 " global: Synced across CPUs but slows tracing down.\n"
3633 " counter: Not a clock, but just an increment\n"
3634 " uptime: Jiffy counter from time of boot\n"
3635 " perf: Same clock that perf events use\n"
3636#ifdef CONFIG_X86_64
3637 " x86-tsc: TSC cycle counter\n"
3638#endif
3639 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3640 " tracing_cpumask\t- Limit which CPUs to trace\n"
3641 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3642 "\t\t\t Remove sub-buffer with rmdir\n"
3643 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003644 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3645 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003646 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003647#ifdef CONFIG_DYNAMIC_FTRACE
3648 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003649 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3650 "\t\t\t functions\n"
3651 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3652 "\t modules: Can select a group via module\n"
3653 "\t Format: :mod:<module-name>\n"
3654 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3655 "\t triggers: a command to perform when function is hit\n"
3656 "\t Format: <function>:<trigger>[:count]\n"
3657 "\t trigger: traceon, traceoff\n"
3658 "\t\t enable_event:<system>:<event>\n"
3659 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003660#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003661 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003662#endif
3663#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003664 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003665#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003666 "\t\t dump\n"
3667 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003668 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3669 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3670 "\t The first one will disable tracing every time do_fault is hit\n"
3671 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3672 "\t The first time do trap is hit and it disables tracing, the\n"
3673 "\t counter will decrement to 2. If tracing is already disabled,\n"
3674 "\t the counter will not decrement. It only decrements when the\n"
3675 "\t trigger did work\n"
3676 "\t To remove trigger without count:\n"
3677 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3678 "\t To remove trigger with a count:\n"
3679 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003680 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003681 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3682 "\t modules: Can select a group via module command :mod:\n"
3683 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003684#endif /* CONFIG_DYNAMIC_FTRACE */
3685#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003686 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3687 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003688#endif
3689#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3690 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003691 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003692 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3693#endif
3694#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003695 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3696 "\t\t\t snapshot buffer. Read the contents for more\n"
3697 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003698#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003699#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003700 " stack_trace\t\t- Shows the max stack trace when active\n"
3701 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003702 "\t\t\t Write into this file to reset the max size (trigger a\n"
3703 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003704#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003705 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3706 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003707#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003708#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003709 " events/\t\t- Directory containing all trace event subsystems:\n"
3710 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3711 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003712 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3713 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003714 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003715 " events/<system>/<event>/\t- Directory containing control files for\n"
3716 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003717 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3718 " filter\t\t- If set, only events passing filter are traced\n"
3719 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003720 "\t Format: <trigger>[:count][if <filter>]\n"
3721 "\t trigger: traceon, traceoff\n"
3722 "\t enable_event:<system>:<event>\n"
3723 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003724#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003725 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003726#endif
3727#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003728 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003729#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003730 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3731 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3732 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3733 "\t events/block/block_unplug/trigger\n"
3734 "\t The first disables tracing every time block_unplug is hit.\n"
3735 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3736 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3737 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3738 "\t Like function triggers, the counter is only decremented if it\n"
3739 "\t enabled or disabled tracing.\n"
3740 "\t To remove a trigger without a count:\n"
3741 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3742 "\t To remove a trigger with a count:\n"
3743 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3744 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003745;
3746
3747static ssize_t
3748tracing_readme_read(struct file *filp, char __user *ubuf,
3749 size_t cnt, loff_t *ppos)
3750{
3751 return simple_read_from_buffer(ubuf, cnt, ppos,
3752 readme_msg, strlen(readme_msg));
3753}
3754
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003755static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003756 .open = tracing_open_generic,
3757 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003758 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003759};
3760
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003761static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003762{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003763 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003764
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003765 if (*pos || m->count)
3766 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003768 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003769
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003770 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3771 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003772 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003773 continue;
3774
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003775 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003776 }
3777
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003778 return NULL;
3779}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003780
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003781static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3782{
3783 void *v;
3784 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003785
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003786 preempt_disable();
3787 arch_spin_lock(&trace_cmdline_lock);
3788
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003789 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003790 while (l <= *pos) {
3791 v = saved_cmdlines_next(m, v, &l);
3792 if (!v)
3793 return NULL;
3794 }
3795
3796 return v;
3797}
3798
3799static void saved_cmdlines_stop(struct seq_file *m, void *v)
3800{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003801 arch_spin_unlock(&trace_cmdline_lock);
3802 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003803}
3804
3805static int saved_cmdlines_show(struct seq_file *m, void *v)
3806{
3807 char buf[TASK_COMM_LEN];
3808 unsigned int *pid = v;
3809
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003810 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003811 seq_printf(m, "%d %s\n", *pid, buf);
3812 return 0;
3813}
3814
3815static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3816 .start = saved_cmdlines_start,
3817 .next = saved_cmdlines_next,
3818 .stop = saved_cmdlines_stop,
3819 .show = saved_cmdlines_show,
3820};
3821
3822static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3823{
3824 if (tracing_disabled)
3825 return -ENODEV;
3826
3827 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003828}
3829
3830static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003831 .open = tracing_saved_cmdlines_open,
3832 .read = seq_read,
3833 .llseek = seq_lseek,
3834 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003835};
3836
3837static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003838tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3839 size_t cnt, loff_t *ppos)
3840{
3841 char buf[64];
3842 int r;
3843
3844 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003845 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003846 arch_spin_unlock(&trace_cmdline_lock);
3847
3848 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3849}
3850
3851static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3852{
3853 kfree(s->saved_cmdlines);
3854 kfree(s->map_cmdline_to_pid);
3855 kfree(s);
3856}
3857
3858static int tracing_resize_saved_cmdlines(unsigned int val)
3859{
3860 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3861
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003862 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003863 if (!s)
3864 return -ENOMEM;
3865
3866 if (allocate_cmdlines_buffer(val, s) < 0) {
3867 kfree(s);
3868 return -ENOMEM;
3869 }
3870
3871 arch_spin_lock(&trace_cmdline_lock);
3872 savedcmd_temp = savedcmd;
3873 savedcmd = s;
3874 arch_spin_unlock(&trace_cmdline_lock);
3875 free_saved_cmdlines_buffer(savedcmd_temp);
3876
3877 return 0;
3878}
3879
3880static ssize_t
3881tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3882 size_t cnt, loff_t *ppos)
3883{
3884 unsigned long val;
3885 int ret;
3886
3887 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3888 if (ret)
3889 return ret;
3890
3891 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3892 if (!val || val > PID_MAX_DEFAULT)
3893 return -EINVAL;
3894
3895 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3896 if (ret < 0)
3897 return ret;
3898
3899 *ppos += cnt;
3900
3901 return cnt;
3902}
3903
3904static const struct file_operations tracing_saved_cmdlines_size_fops = {
3905 .open = tracing_open_generic,
3906 .read = tracing_saved_cmdlines_size_read,
3907 .write = tracing_saved_cmdlines_size_write,
3908};
3909
3910static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003911tracing_set_trace_read(struct file *filp, char __user *ubuf,
3912 size_t cnt, loff_t *ppos)
3913{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003914 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003915 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003916 int r;
3917
3918 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003919 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003920 mutex_unlock(&trace_types_lock);
3921
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003923}
3924
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003925int tracer_init(struct tracer *t, struct trace_array *tr)
3926{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003927 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003928 return t->init(tr);
3929}
3930
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003931static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003932{
3933 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003934
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003935 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003936 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003937}
3938
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003940/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003941static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3942 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003943{
3944 int cpu, ret = 0;
3945
3946 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3947 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003948 ret = ring_buffer_resize(trace_buf->buffer,
3949 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003950 if (ret < 0)
3951 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003952 per_cpu_ptr(trace_buf->data, cpu)->entries =
3953 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003954 }
3955 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003956 ret = ring_buffer_resize(trace_buf->buffer,
3957 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003958 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003959 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3960 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003961 }
3962
3963 return ret;
3964}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003965#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003966
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003967static int __tracing_resize_ring_buffer(struct trace_array *tr,
3968 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003969{
3970 int ret;
3971
3972 /*
3973 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003974 * we use the size that was given, and we can forget about
3975 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003976 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003977 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003978
Steven Rostedtb382ede62012-10-10 21:44:34 -04003979 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003980 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003981 return 0;
3982
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003983 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003984 if (ret < 0)
3985 return ret;
3986
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003987#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003988 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3989 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003990 goto out;
3991
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003992 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003993 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003994 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3995 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003996 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003997 /*
3998 * AARGH! We are left with different
3999 * size max buffer!!!!
4000 * The max buffer is our "snapshot" buffer.
4001 * When a tracer needs a snapshot (one of the
4002 * latency tracers), it swaps the max buffer
4003 * with the saved snap shot. We succeeded to
4004 * update the size of the main buffer, but failed to
4005 * update the size of the max buffer. But when we tried
4006 * to reset the main buffer to the original size, we
4007 * failed there too. This is very unlikely to
4008 * happen, but if it does, warn and kill all
4009 * tracing.
4010 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004011 WARN_ON(1);
4012 tracing_disabled = 1;
4013 }
4014 return ret;
4015 }
4016
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004017 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004018 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004019 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004020 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004021
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004022 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004023#endif /* CONFIG_TRACER_MAX_TRACE */
4024
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004025 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004026 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004027 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004028 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004029
4030 return ret;
4031}
4032
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004033static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4034 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004035{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004036 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004037
4038 mutex_lock(&trace_types_lock);
4039
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004040 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4041 /* make sure, this cpu is enabled in the mask */
4042 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4043 ret = -EINVAL;
4044 goto out;
4045 }
4046 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004047
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004048 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004049 if (ret < 0)
4050 ret = -ENOMEM;
4051
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004052out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004053 mutex_unlock(&trace_types_lock);
4054
4055 return ret;
4056}
4057
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004058
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004059/**
4060 * tracing_update_buffers - used by tracing facility to expand ring buffers
4061 *
4062 * To save on memory when the tracing is never used on a system with it
4063 * configured in. The ring buffers are set to a minimum size. But once
4064 * a user starts to use the tracing facility, then they need to grow
4065 * to their default size.
4066 *
4067 * This function is to be called when a tracer is about to be used.
4068 */
4069int tracing_update_buffers(void)
4070{
4071 int ret = 0;
4072
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004073 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004074 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004075 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004076 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004077 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004078
4079 return ret;
4080}
4081
Steven Rostedt577b7852009-02-26 23:43:05 -05004082struct trace_option_dentry;
4083
4084static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004085create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004086
4087static void
4088destroy_trace_option_files(struct trace_option_dentry *topts);
4089
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004090/*
4091 * Used to clear out the tracer before deletion of an instance.
4092 * Must have trace_types_lock held.
4093 */
4094static void tracing_set_nop(struct trace_array *tr)
4095{
4096 if (tr->current_trace == &nop_trace)
4097 return;
4098
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004099 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004100
4101 if (tr->current_trace->reset)
4102 tr->current_trace->reset(tr);
4103
4104 tr->current_trace = &nop_trace;
4105}
4106
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004107static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004108{
Steven Rostedt577b7852009-02-26 23:43:05 -05004109 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004110 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004111#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004112 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004113#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004114 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004115
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004116 mutex_lock(&trace_types_lock);
4117
Steven Rostedt73c51622009-03-11 13:42:01 -04004118 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004119 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004120 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004121 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004122 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004123 ret = 0;
4124 }
4125
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004126 for (t = trace_types; t; t = t->next) {
4127 if (strcmp(t->name, buf) == 0)
4128 break;
4129 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004130 if (!t) {
4131 ret = -EINVAL;
4132 goto out;
4133 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004134 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004135 goto out;
4136
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004137 /* Some tracers are only allowed for the top level buffer */
4138 if (!trace_ok_for_array(t, tr)) {
4139 ret = -EINVAL;
4140 goto out;
4141 }
4142
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004143 /* If trace pipe files are being read, we can't change the tracer */
4144 if (tr->current_trace->ref) {
4145 ret = -EBUSY;
4146 goto out;
4147 }
4148
Steven Rostedt9f029e82008-11-12 15:24:24 -05004149 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004150
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004151 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004152
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004153 if (tr->current_trace->reset)
4154 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004155
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004156 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004157 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004158
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004159#ifdef CONFIG_TRACER_MAX_TRACE
4160 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004161
4162 if (had_max_tr && !t->use_max_tr) {
4163 /*
4164 * We need to make sure that the update_max_tr sees that
4165 * current_trace changed to nop_trace to keep it from
4166 * swapping the buffers after we resize it.
4167 * The update_max_tr is called from interrupts disabled
4168 * so a synchronized_sched() is sufficient.
4169 */
4170 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004171 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004172 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004173#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004174 /* Currently, only the top instance has options */
4175 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4176 destroy_trace_option_files(topts);
4177 topts = create_trace_option_files(tr, t);
4178 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004179
4180#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004181 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004182 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004183 if (ret < 0)
4184 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004185 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004186#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004187
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004188 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004189 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004190 if (ret)
4191 goto out;
4192 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004193
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004194 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004195 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004196 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004197 out:
4198 mutex_unlock(&trace_types_lock);
4199
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004200 return ret;
4201}
4202
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004203static ssize_t
4204tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4205 size_t cnt, loff_t *ppos)
4206{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004207 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004208 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004209 int i;
4210 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004211 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004212
Steven Rostedt60063a62008-10-28 10:44:24 -04004213 ret = cnt;
4214
Li Zefanee6c2c12009-09-18 14:06:47 +08004215 if (cnt > MAX_TRACER_SIZE)
4216 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004217
4218 if (copy_from_user(&buf, ubuf, cnt))
4219 return -EFAULT;
4220
4221 buf[cnt] = 0;
4222
4223 /* strip ending whitespace. */
4224 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4225 buf[i] = 0;
4226
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004227 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004228 if (err)
4229 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004230
Jiri Olsacf8517c2009-10-23 19:36:16 -04004231 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004232
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004233 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004234}
4235
4236static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004237tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4238 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004239{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004240 char buf[64];
4241 int r;
4242
Steven Rostedtcffae432008-05-12 21:21:00 +02004243 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004244 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004245 if (r > sizeof(buf))
4246 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004247 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004248}
4249
4250static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004251tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4252 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004253{
Hannes Eder5e398412009-02-10 19:44:34 +01004254 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004255 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004256
Peter Huewe22fe9b52011-06-07 21:58:27 +02004257 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4258 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004259 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004260
4261 *ptr = val * 1000;
4262
4263 return cnt;
4264}
4265
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004266static ssize_t
4267tracing_thresh_read(struct file *filp, char __user *ubuf,
4268 size_t cnt, loff_t *ppos)
4269{
4270 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4271}
4272
4273static ssize_t
4274tracing_thresh_write(struct file *filp, const char __user *ubuf,
4275 size_t cnt, loff_t *ppos)
4276{
4277 struct trace_array *tr = filp->private_data;
4278 int ret;
4279
4280 mutex_lock(&trace_types_lock);
4281 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4282 if (ret < 0)
4283 goto out;
4284
4285 if (tr->current_trace->update_thresh) {
4286 ret = tr->current_trace->update_thresh(tr);
4287 if (ret < 0)
4288 goto out;
4289 }
4290
4291 ret = cnt;
4292out:
4293 mutex_unlock(&trace_types_lock);
4294
4295 return ret;
4296}
4297
4298static ssize_t
4299tracing_max_lat_read(struct file *filp, char __user *ubuf,
4300 size_t cnt, loff_t *ppos)
4301{
4302 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4303}
4304
4305static ssize_t
4306tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4307 size_t cnt, loff_t *ppos)
4308{
4309 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4310}
4311
Steven Rostedtb3806b42008-05-12 21:20:46 +02004312static int tracing_open_pipe(struct inode *inode, struct file *filp)
4313{
Oleg Nesterov15544202013-07-23 17:25:57 +02004314 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004315 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004316 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004317
4318 if (tracing_disabled)
4319 return -ENODEV;
4320
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004321 if (trace_array_get(tr) < 0)
4322 return -ENODEV;
4323
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004324 mutex_lock(&trace_types_lock);
4325
Steven Rostedtb3806b42008-05-12 21:20:46 +02004326 /* create a buffer to store the information to pass to userspace */
4327 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004328 if (!iter) {
4329 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004330 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004331 goto out;
4332 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004333
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004334 trace_seq_init(&iter->seq);
4335
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004336 /*
4337 * We make a copy of the current tracer to avoid concurrent
4338 * changes on it while we are reading.
4339 */
4340 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4341 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004342 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004343 goto fail;
4344 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004345 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004346
4347 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4348 ret = -ENOMEM;
4349 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304350 }
4351
Steven Rostedta3097202008-11-07 22:36:02 -05004352 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304353 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004354
Steven Rostedt112f38a72009-06-01 15:16:05 -04004355 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4356 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4357
David Sharp8be07092012-11-13 12:18:22 -08004358 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004359 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004360 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4361
Oleg Nesterov15544202013-07-23 17:25:57 +02004362 iter->tr = tr;
4363 iter->trace_buffer = &tr->trace_buffer;
4364 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004365 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004366 filp->private_data = iter;
4367
Steven Rostedt107bad82008-05-12 21:21:01 +02004368 if (iter->trace->pipe_open)
4369 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004370
Arnd Bergmannb4447862010-07-07 23:40:11 +02004371 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004372
4373 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004374out:
4375 mutex_unlock(&trace_types_lock);
4376 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004377
4378fail:
4379 kfree(iter->trace);
4380 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004381 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004382 mutex_unlock(&trace_types_lock);
4383 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004384}
4385
4386static int tracing_release_pipe(struct inode *inode, struct file *file)
4387{
4388 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004389 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004390
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004391 mutex_lock(&trace_types_lock);
4392
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004393 tr->current_trace->ref--;
4394
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004395 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004396 iter->trace->pipe_close(iter);
4397
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004398 mutex_unlock(&trace_types_lock);
4399
Rusty Russell44623442009-01-01 10:12:23 +10304400 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004401 mutex_destroy(&iter->mutex);
4402 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004403 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004404
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004405 trace_array_put(tr);
4406
Steven Rostedtb3806b42008-05-12 21:20:46 +02004407 return 0;
4408}
4409
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004410static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004411trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004412{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004413 /* Iterators are static, they should be filled or empty */
4414 if (trace_buffer_iter(iter, iter->cpu_file))
4415 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004416
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004417 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004418 /*
4419 * Always select as readable when in blocking mode
4420 */
4421 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004422 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004423 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004424 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004425}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004426
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004427static unsigned int
4428tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4429{
4430 struct trace_iterator *iter = filp->private_data;
4431
4432 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004433}
4434
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004435/* Must be called with trace_types_lock mutex held. */
4436static int tracing_wait_pipe(struct file *filp)
4437{
4438 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004439 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004440
4441 while (trace_empty(iter)) {
4442
4443 if ((filp->f_flags & O_NONBLOCK)) {
4444 return -EAGAIN;
4445 }
4446
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004447 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004448 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004449 * We still block if tracing is disabled, but we have never
4450 * read anything. This allows a user to cat this file, and
4451 * then enable tracing. But after we have read something,
4452 * we give an EOF when tracing is again disabled.
4453 *
4454 * iter->pos will be 0 if we haven't read anything.
4455 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004456 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004457 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004458
4459 mutex_unlock(&iter->mutex);
4460
Rabin Vincente30f53a2014-11-10 19:46:34 +01004461 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004462
4463 mutex_lock(&iter->mutex);
4464
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004465 if (ret)
4466 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004467 }
4468
4469 return 1;
4470}
4471
Steven Rostedtb3806b42008-05-12 21:20:46 +02004472/*
4473 * Consumer reader.
4474 */
4475static ssize_t
4476tracing_read_pipe(struct file *filp, char __user *ubuf,
4477 size_t cnt, loff_t *ppos)
4478{
4479 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004480 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004481 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004482
4483 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004484 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4485 if (sret != -EBUSY)
4486 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004487
Steven Rostedtf9520752009-03-02 14:04:40 -05004488 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004489
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004490 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004491 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004492 if (unlikely(iter->trace->name != tr->current_trace->name))
4493 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004494 mutex_unlock(&trace_types_lock);
4495
4496 /*
4497 * Avoid more than one consumer on a single file descriptor
4498 * This is just a matter of traces coherency, the ring buffer itself
4499 * is protected.
4500 */
4501 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004502 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004503 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4504 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004505 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004506 }
4507
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004508waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004509 sret = tracing_wait_pipe(filp);
4510 if (sret <= 0)
4511 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004512
4513 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004514 if (trace_empty(iter)) {
4515 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004516 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004517 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004518
4519 if (cnt >= PAGE_SIZE)
4520 cnt = PAGE_SIZE - 1;
4521
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004522 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004523 memset(&iter->seq, 0,
4524 sizeof(struct trace_iterator) -
4525 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004526 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004527 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004528
Lai Jiangshan4f535962009-05-18 19:35:34 +08004529 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004530 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004531 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004532 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004533 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004534
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004535 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004536 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004537 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004538 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004539 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004540 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004541 if (ret != TRACE_TYPE_NO_CONSUME)
4542 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004543
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004544 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004545 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004546
4547 /*
4548 * Setting the full flag means we reached the trace_seq buffer
4549 * size and we should leave by partial output condition above.
4550 * One of the trace_seq_* functions is not used properly.
4551 */
4552 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4553 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004554 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004555 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004556 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004557
Steven Rostedtb3806b42008-05-12 21:20:46 +02004558 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004559 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004560 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004561 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004562
4563 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004564 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004565 * entries, go back to wait for more entries.
4566 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004567 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004568 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004569
Steven Rostedt107bad82008-05-12 21:21:01 +02004570out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004571 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004572
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004573 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004574}
4575
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004576static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4577 unsigned int idx)
4578{
4579 __free_page(spd->pages[idx]);
4580}
4581
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004582static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004583 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004584 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004585 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004586 .steal = generic_pipe_buf_steal,
4587 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004588};
4589
Steven Rostedt34cd4992009-02-09 12:06:29 -05004590static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004591tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004592{
4593 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004594 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004595 int ret;
4596
4597 /* Seq buffer is page-sized, exactly what we need. */
4598 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004599 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004600 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004601
4602 if (trace_seq_has_overflowed(&iter->seq)) {
4603 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004604 break;
4605 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004606
4607 /*
4608 * This should not be hit, because it should only
4609 * be set if the iter->seq overflowed. But check it
4610 * anyway to be safe.
4611 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004612 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004613 iter->seq.seq.len = save_len;
4614 break;
4615 }
4616
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004617 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004618 if (rem < count) {
4619 rem = 0;
4620 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004621 break;
4622 }
4623
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004624 if (ret != TRACE_TYPE_NO_CONSUME)
4625 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004626 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004627 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004628 rem = 0;
4629 iter->ent = NULL;
4630 break;
4631 }
4632 }
4633
4634 return rem;
4635}
4636
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004637static ssize_t tracing_splice_read_pipe(struct file *filp,
4638 loff_t *ppos,
4639 struct pipe_inode_info *pipe,
4640 size_t len,
4641 unsigned int flags)
4642{
Jens Axboe35f3d142010-05-20 10:43:18 +02004643 struct page *pages_def[PIPE_DEF_BUFFERS];
4644 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004645 struct trace_iterator *iter = filp->private_data;
4646 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004647 .pages = pages_def,
4648 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004649 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004650 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004651 .flags = flags,
4652 .ops = &tracing_pipe_buf_ops,
4653 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004654 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004655 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004656 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004657 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004658 unsigned int i;
4659
Jens Axboe35f3d142010-05-20 10:43:18 +02004660 if (splice_grow_spd(pipe, &spd))
4661 return -ENOMEM;
4662
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004663 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004664 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004665 if (unlikely(iter->trace->name != tr->current_trace->name))
4666 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004667 mutex_unlock(&trace_types_lock);
4668
4669 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004670
4671 if (iter->trace->splice_read) {
4672 ret = iter->trace->splice_read(iter, filp,
4673 ppos, pipe, len, flags);
4674 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004675 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004676 }
4677
4678 ret = tracing_wait_pipe(filp);
4679 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004680 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004681
Jason Wessel955b61e2010-08-05 09:22:23 -05004682 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004683 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004684 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004685 }
4686
Lai Jiangshan4f535962009-05-18 19:35:34 +08004687 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004688 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004689
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004690 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004691 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004692 spd.pages[i] = alloc_page(GFP_KERNEL);
4693 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004694 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004695
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004696 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004697
4698 /* Copy the data into the page, so we can start over. */
4699 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004700 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004701 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004702 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004703 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004704 break;
4705 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004706 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004707 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004708
Steven Rostedtf9520752009-03-02 14:04:40 -05004709 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004710 }
4711
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004712 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004713 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004714 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004715
4716 spd.nr_pages = i;
4717
Jens Axboe35f3d142010-05-20 10:43:18 +02004718 ret = splice_to_pipe(pipe, &spd);
4719out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004720 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004721 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004722
Steven Rostedt34cd4992009-02-09 12:06:29 -05004723out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004724 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004725 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004726}
4727
Steven Rostedta98a3c32008-05-12 21:20:59 +02004728static ssize_t
4729tracing_entries_read(struct file *filp, char __user *ubuf,
4730 size_t cnt, loff_t *ppos)
4731{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004732 struct inode *inode = file_inode(filp);
4733 struct trace_array *tr = inode->i_private;
4734 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004735 char buf[64];
4736 int r = 0;
4737 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004738
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004739 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004740
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004741 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004742 int cpu, buf_size_same;
4743 unsigned long size;
4744
4745 size = 0;
4746 buf_size_same = 1;
4747 /* check if all cpu sizes are same */
4748 for_each_tracing_cpu(cpu) {
4749 /* fill in the size from first enabled cpu */
4750 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004751 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4752 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004753 buf_size_same = 0;
4754 break;
4755 }
4756 }
4757
4758 if (buf_size_same) {
4759 if (!ring_buffer_expanded)
4760 r = sprintf(buf, "%lu (expanded: %lu)\n",
4761 size >> 10,
4762 trace_buf_size >> 10);
4763 else
4764 r = sprintf(buf, "%lu\n", size >> 10);
4765 } else
4766 r = sprintf(buf, "X\n");
4767 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004768 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004769
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004770 mutex_unlock(&trace_types_lock);
4771
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004772 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4773 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004774}
4775
4776static ssize_t
4777tracing_entries_write(struct file *filp, const char __user *ubuf,
4778 size_t cnt, loff_t *ppos)
4779{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004780 struct inode *inode = file_inode(filp);
4781 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004782 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004783 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004784
Peter Huewe22fe9b52011-06-07 21:58:27 +02004785 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4786 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004787 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004788
4789 /* must have at least 1 entry */
4790 if (!val)
4791 return -EINVAL;
4792
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004793 /* value is in KB */
4794 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004795 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004796 if (ret < 0)
4797 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004798
Jiri Olsacf8517c2009-10-23 19:36:16 -04004799 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004800
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004801 return cnt;
4802}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004803
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004804static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004805tracing_total_entries_read(struct file *filp, char __user *ubuf,
4806 size_t cnt, loff_t *ppos)
4807{
4808 struct trace_array *tr = filp->private_data;
4809 char buf[64];
4810 int r, cpu;
4811 unsigned long size = 0, expanded_size = 0;
4812
4813 mutex_lock(&trace_types_lock);
4814 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004815 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004816 if (!ring_buffer_expanded)
4817 expanded_size += trace_buf_size >> 10;
4818 }
4819 if (ring_buffer_expanded)
4820 r = sprintf(buf, "%lu\n", size);
4821 else
4822 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4823 mutex_unlock(&trace_types_lock);
4824
4825 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4826}
4827
4828static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004829tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4830 size_t cnt, loff_t *ppos)
4831{
4832 /*
4833 * There is no need to read what the user has written, this function
4834 * is just to make sure that there is no error when "echo" is used
4835 */
4836
4837 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004838
4839 return cnt;
4840}
4841
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004842static int
4843tracing_free_buffer_release(struct inode *inode, struct file *filp)
4844{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004845 struct trace_array *tr = inode->i_private;
4846
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004847 /* disable tracing ? */
4848 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004849 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004850 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004851 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004852
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004853 trace_array_put(tr);
4854
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004855 return 0;
4856}
4857
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004858static ssize_t
4859tracing_mark_write(struct file *filp, const char __user *ubuf,
4860 size_t cnt, loff_t *fpos)
4861{
Steven Rostedtd696b582011-09-22 11:50:27 -04004862 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004863 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004864 struct ring_buffer_event *event;
4865 struct ring_buffer *buffer;
4866 struct print_entry *entry;
4867 unsigned long irq_flags;
4868 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004869 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004870 int nr_pages = 1;
4871 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004872 int offset;
4873 int size;
4874 int len;
4875 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004876 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004877
Steven Rostedtc76f0692008-11-07 22:36:02 -05004878 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004879 return -EINVAL;
4880
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004881 if (!(trace_flags & TRACE_ITER_MARKERS))
4882 return -EINVAL;
4883
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004884 if (cnt > TRACE_BUF_SIZE)
4885 cnt = TRACE_BUF_SIZE;
4886
Steven Rostedtd696b582011-09-22 11:50:27 -04004887 /*
4888 * Userspace is injecting traces into the kernel trace buffer.
4889 * We want to be as non intrusive as possible.
4890 * To do so, we do not want to allocate any special buffers
4891 * or take any locks, but instead write the userspace data
4892 * straight into the ring buffer.
4893 *
4894 * First we need to pin the userspace buffer into memory,
4895 * which, most likely it is, because it just referenced it.
4896 * But there's no guarantee that it is. By using get_user_pages_fast()
4897 * and kmap_atomic/kunmap_atomic() we can get access to the
4898 * pages directly. We then write the data directly into the
4899 * ring buffer.
4900 */
4901 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004902
Steven Rostedtd696b582011-09-22 11:50:27 -04004903 /* check if we cross pages */
4904 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4905 nr_pages = 2;
4906
4907 offset = addr & (PAGE_SIZE - 1);
4908 addr &= PAGE_MASK;
4909
4910 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4911 if (ret < nr_pages) {
4912 while (--ret >= 0)
4913 put_page(pages[ret]);
4914 written = -EFAULT;
4915 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004916 }
4917
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004918 for (i = 0; i < nr_pages; i++)
4919 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004920
4921 local_save_flags(irq_flags);
4922 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004923 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004924 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4925 irq_flags, preempt_count());
4926 if (!event) {
4927 /* Ring buffer disabled, return as if not open for write */
4928 written = -EBADF;
4929 goto out_unlock;
4930 }
4931
4932 entry = ring_buffer_event_data(event);
4933 entry->ip = _THIS_IP_;
4934
4935 if (nr_pages == 2) {
4936 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004937 memcpy(&entry->buf, map_page[0] + offset, len);
4938 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004939 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004940 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004941
4942 if (entry->buf[cnt - 1] != '\n') {
4943 entry->buf[cnt] = '\n';
4944 entry->buf[cnt + 1] = '\0';
4945 } else
4946 entry->buf[cnt] = '\0';
4947
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004948 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004949
4950 written = cnt;
4951
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004952 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004953
Steven Rostedtd696b582011-09-22 11:50:27 -04004954 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004955 for (i = 0; i < nr_pages; i++){
4956 kunmap_atomic(map_page[i]);
4957 put_page(pages[i]);
4958 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004959 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004960 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004961}
4962
Li Zefan13f16d22009-12-08 11:16:11 +08004963static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004964{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004965 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004966 int i;
4967
4968 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004969 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004970 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004971 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4972 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004973 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004974
Li Zefan13f16d22009-12-08 11:16:11 +08004975 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004976}
4977
Steven Rostedte1e232c2014-02-10 23:38:46 -05004978static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004979{
Zhaolei5079f322009-08-25 16:12:56 +08004980 int i;
4981
Zhaolei5079f322009-08-25 16:12:56 +08004982 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4983 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4984 break;
4985 }
4986 if (i == ARRAY_SIZE(trace_clocks))
4987 return -EINVAL;
4988
Zhaolei5079f322009-08-25 16:12:56 +08004989 mutex_lock(&trace_types_lock);
4990
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004991 tr->clock_id = i;
4992
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004993 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004994
David Sharp60303ed2012-10-11 16:27:52 -07004995 /*
4996 * New clock may not be consistent with the previous clock.
4997 * Reset the buffer so that it doesn't have incomparable timestamps.
4998 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004999 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005000
5001#ifdef CONFIG_TRACER_MAX_TRACE
5002 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5003 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005004 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005005#endif
David Sharp60303ed2012-10-11 16:27:52 -07005006
Zhaolei5079f322009-08-25 16:12:56 +08005007 mutex_unlock(&trace_types_lock);
5008
Steven Rostedte1e232c2014-02-10 23:38:46 -05005009 return 0;
5010}
5011
5012static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5013 size_t cnt, loff_t *fpos)
5014{
5015 struct seq_file *m = filp->private_data;
5016 struct trace_array *tr = m->private;
5017 char buf[64];
5018 const char *clockstr;
5019 int ret;
5020
5021 if (cnt >= sizeof(buf))
5022 return -EINVAL;
5023
5024 if (copy_from_user(&buf, ubuf, cnt))
5025 return -EFAULT;
5026
5027 buf[cnt] = 0;
5028
5029 clockstr = strstrip(buf);
5030
5031 ret = tracing_set_clock(tr, clockstr);
5032 if (ret)
5033 return ret;
5034
Zhaolei5079f322009-08-25 16:12:56 +08005035 *fpos += cnt;
5036
5037 return cnt;
5038}
5039
Li Zefan13f16d22009-12-08 11:16:11 +08005040static int tracing_clock_open(struct inode *inode, struct file *file)
5041{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005042 struct trace_array *tr = inode->i_private;
5043 int ret;
5044
Li Zefan13f16d22009-12-08 11:16:11 +08005045 if (tracing_disabled)
5046 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005047
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005048 if (trace_array_get(tr))
5049 return -ENODEV;
5050
5051 ret = single_open(file, tracing_clock_show, inode->i_private);
5052 if (ret < 0)
5053 trace_array_put(tr);
5054
5055 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005056}
5057
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005058struct ftrace_buffer_info {
5059 struct trace_iterator iter;
5060 void *spare;
5061 unsigned int read;
5062};
5063
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005064#ifdef CONFIG_TRACER_SNAPSHOT
5065static int tracing_snapshot_open(struct inode *inode, struct file *file)
5066{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005067 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005068 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005069 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005070 int ret = 0;
5071
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005072 if (trace_array_get(tr) < 0)
5073 return -ENODEV;
5074
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005075 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005076 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005077 if (IS_ERR(iter))
5078 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005079 } else {
5080 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005081 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005082 m = kzalloc(sizeof(*m), GFP_KERNEL);
5083 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005084 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005085 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5086 if (!iter) {
5087 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005088 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005089 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005090 ret = 0;
5091
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005092 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005093 iter->trace_buffer = &tr->max_buffer;
5094 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005095 m->private = iter;
5096 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005097 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005098out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005099 if (ret < 0)
5100 trace_array_put(tr);
5101
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005102 return ret;
5103}
5104
5105static ssize_t
5106tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5107 loff_t *ppos)
5108{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005109 struct seq_file *m = filp->private_data;
5110 struct trace_iterator *iter = m->private;
5111 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005112 unsigned long val;
5113 int ret;
5114
5115 ret = tracing_update_buffers();
5116 if (ret < 0)
5117 return ret;
5118
5119 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5120 if (ret)
5121 return ret;
5122
5123 mutex_lock(&trace_types_lock);
5124
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005125 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005126 ret = -EBUSY;
5127 goto out;
5128 }
5129
5130 switch (val) {
5131 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005132 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5133 ret = -EINVAL;
5134 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005135 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005136 if (tr->allocated_snapshot)
5137 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005138 break;
5139 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005140/* Only allow per-cpu swap if the ring buffer supports it */
5141#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5142 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5143 ret = -EINVAL;
5144 break;
5145 }
5146#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005147 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005148 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005149 if (ret < 0)
5150 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005151 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005152 local_irq_disable();
5153 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005154 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005155 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005156 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005157 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005158 local_irq_enable();
5159 break;
5160 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005161 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005162 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5163 tracing_reset_online_cpus(&tr->max_buffer);
5164 else
5165 tracing_reset(&tr->max_buffer, iter->cpu_file);
5166 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005167 break;
5168 }
5169
5170 if (ret >= 0) {
5171 *ppos += cnt;
5172 ret = cnt;
5173 }
5174out:
5175 mutex_unlock(&trace_types_lock);
5176 return ret;
5177}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005178
5179static int tracing_snapshot_release(struct inode *inode, struct file *file)
5180{
5181 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005182 int ret;
5183
5184 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005185
5186 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005187 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005188
5189 /* If write only, the seq_file is just a stub */
5190 if (m)
5191 kfree(m->private);
5192 kfree(m);
5193
5194 return 0;
5195}
5196
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005197static int tracing_buffers_open(struct inode *inode, struct file *filp);
5198static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5199 size_t count, loff_t *ppos);
5200static int tracing_buffers_release(struct inode *inode, struct file *file);
5201static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5202 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5203
5204static int snapshot_raw_open(struct inode *inode, struct file *filp)
5205{
5206 struct ftrace_buffer_info *info;
5207 int ret;
5208
5209 ret = tracing_buffers_open(inode, filp);
5210 if (ret < 0)
5211 return ret;
5212
5213 info = filp->private_data;
5214
5215 if (info->iter.trace->use_max_tr) {
5216 tracing_buffers_release(inode, filp);
5217 return -EBUSY;
5218 }
5219
5220 info->iter.snapshot = true;
5221 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5222
5223 return ret;
5224}
5225
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005226#endif /* CONFIG_TRACER_SNAPSHOT */
5227
5228
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005229static const struct file_operations tracing_thresh_fops = {
5230 .open = tracing_open_generic,
5231 .read = tracing_thresh_read,
5232 .write = tracing_thresh_write,
5233 .llseek = generic_file_llseek,
5234};
5235
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005236static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005237 .open = tracing_open_generic,
5238 .read = tracing_max_lat_read,
5239 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005240 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005241};
5242
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005243static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005244 .open = tracing_open_generic,
5245 .read = tracing_set_trace_read,
5246 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005247 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005248};
5249
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005250static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005251 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005252 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005253 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005254 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005255 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005256 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005257};
5258
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005259static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005260 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005261 .read = tracing_entries_read,
5262 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005263 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005264 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005265};
5266
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005267static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005268 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005269 .read = tracing_total_entries_read,
5270 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005271 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005272};
5273
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005274static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005275 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005276 .write = tracing_free_buffer_write,
5277 .release = tracing_free_buffer_release,
5278};
5279
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005280static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005281 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005282 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005283 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005284 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005285};
5286
Zhaolei5079f322009-08-25 16:12:56 +08005287static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005288 .open = tracing_clock_open,
5289 .read = seq_read,
5290 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005291 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005292 .write = tracing_clock_write,
5293};
5294
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005295#ifdef CONFIG_TRACER_SNAPSHOT
5296static const struct file_operations snapshot_fops = {
5297 .open = tracing_snapshot_open,
5298 .read = seq_read,
5299 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005300 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005301 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005302};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005303
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005304static const struct file_operations snapshot_raw_fops = {
5305 .open = snapshot_raw_open,
5306 .read = tracing_buffers_read,
5307 .release = tracing_buffers_release,
5308 .splice_read = tracing_buffers_splice_read,
5309 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005310};
5311
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005312#endif /* CONFIG_TRACER_SNAPSHOT */
5313
Steven Rostedt2cadf912008-12-01 22:20:19 -05005314static int tracing_buffers_open(struct inode *inode, struct file *filp)
5315{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005316 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005317 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005318 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005319
5320 if (tracing_disabled)
5321 return -ENODEV;
5322
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005323 if (trace_array_get(tr) < 0)
5324 return -ENODEV;
5325
Steven Rostedt2cadf912008-12-01 22:20:19 -05005326 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005327 if (!info) {
5328 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005329 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005330 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005331
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005332 mutex_lock(&trace_types_lock);
5333
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005334 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005335 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005336 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005337 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005338 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005339 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005340 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005341
5342 filp->private_data = info;
5343
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005344 tr->current_trace->ref++;
5345
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005346 mutex_unlock(&trace_types_lock);
5347
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005348 ret = nonseekable_open(inode, filp);
5349 if (ret < 0)
5350 trace_array_put(tr);
5351
5352 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005353}
5354
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005355static unsigned int
5356tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5357{
5358 struct ftrace_buffer_info *info = filp->private_data;
5359 struct trace_iterator *iter = &info->iter;
5360
5361 return trace_poll(iter, filp, poll_table);
5362}
5363
Steven Rostedt2cadf912008-12-01 22:20:19 -05005364static ssize_t
5365tracing_buffers_read(struct file *filp, char __user *ubuf,
5366 size_t count, loff_t *ppos)
5367{
5368 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005369 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005370 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005371 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005372
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005373 if (!count)
5374 return 0;
5375
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005376 mutex_lock(&trace_types_lock);
5377
5378#ifdef CONFIG_TRACER_MAX_TRACE
5379 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5380 size = -EBUSY;
5381 goto out_unlock;
5382 }
5383#endif
5384
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005385 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005386 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5387 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005388 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005389 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005390 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005391
Steven Rostedt2cadf912008-12-01 22:20:19 -05005392 /* Do we have previous read data to read? */
5393 if (info->read < PAGE_SIZE)
5394 goto read;
5395
Steven Rostedtb6273442013-02-28 13:44:11 -05005396 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005397 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005398 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005399 &info->spare,
5400 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005401 iter->cpu_file, 0);
5402 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005403
5404 if (ret < 0) {
5405 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005406 if ((filp->f_flags & O_NONBLOCK)) {
5407 size = -EAGAIN;
5408 goto out_unlock;
5409 }
5410 mutex_unlock(&trace_types_lock);
Rabin Vincente30f53a2014-11-10 19:46:34 +01005411 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005412 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005413 if (ret) {
5414 size = ret;
5415 goto out_unlock;
5416 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005417 goto again;
5418 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005419 size = 0;
5420 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005421 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005422
Steven Rostedt436fc282011-10-14 10:44:25 -04005423 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005424 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005425 size = PAGE_SIZE - info->read;
5426 if (size > count)
5427 size = count;
5428
5429 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005430 if (ret == size) {
5431 size = -EFAULT;
5432 goto out_unlock;
5433 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005434 size -= ret;
5435
Steven Rostedt2cadf912008-12-01 22:20:19 -05005436 *ppos += size;
5437 info->read += size;
5438
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005439 out_unlock:
5440 mutex_unlock(&trace_types_lock);
5441
Steven Rostedt2cadf912008-12-01 22:20:19 -05005442 return size;
5443}
5444
5445static int tracing_buffers_release(struct inode *inode, struct file *file)
5446{
5447 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005448 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005449
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005450 mutex_lock(&trace_types_lock);
5451
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005452 iter->tr->current_trace->ref--;
5453
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005454 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005455
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005456 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005457 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005458 kfree(info);
5459
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005460 mutex_unlock(&trace_types_lock);
5461
Steven Rostedt2cadf912008-12-01 22:20:19 -05005462 return 0;
5463}
5464
5465struct buffer_ref {
5466 struct ring_buffer *buffer;
5467 void *page;
5468 int ref;
5469};
5470
5471static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5472 struct pipe_buffer *buf)
5473{
5474 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5475
5476 if (--ref->ref)
5477 return;
5478
5479 ring_buffer_free_read_page(ref->buffer, ref->page);
5480 kfree(ref);
5481 buf->private = 0;
5482}
5483
Steven Rostedt2cadf912008-12-01 22:20:19 -05005484static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5485 struct pipe_buffer *buf)
5486{
5487 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5488
5489 ref->ref++;
5490}
5491
5492/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005493static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005494 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005495 .confirm = generic_pipe_buf_confirm,
5496 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005497 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005498 .get = buffer_pipe_buf_get,
5499};
5500
5501/*
5502 * Callback from splice_to_pipe(), if we need to release some pages
5503 * at the end of the spd in case we error'ed out in filling the pipe.
5504 */
5505static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5506{
5507 struct buffer_ref *ref =
5508 (struct buffer_ref *)spd->partial[i].private;
5509
5510 if (--ref->ref)
5511 return;
5512
5513 ring_buffer_free_read_page(ref->buffer, ref->page);
5514 kfree(ref);
5515 spd->partial[i].private = 0;
5516}
5517
5518static ssize_t
5519tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5520 struct pipe_inode_info *pipe, size_t len,
5521 unsigned int flags)
5522{
5523 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005524 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005525 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5526 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005527 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005528 .pages = pages_def,
5529 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005530 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005531 .flags = flags,
5532 .ops = &buffer_pipe_buf_ops,
5533 .spd_release = buffer_spd_release,
5534 };
5535 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005536 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005537 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005538
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005539 mutex_lock(&trace_types_lock);
5540
5541#ifdef CONFIG_TRACER_MAX_TRACE
5542 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5543 ret = -EBUSY;
5544 goto out;
5545 }
5546#endif
5547
5548 if (splice_grow_spd(pipe, &spd)) {
5549 ret = -ENOMEM;
5550 goto out;
5551 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005552
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005553 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005554 ret = -EINVAL;
5555 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005556 }
5557
5558 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005559 if (len < PAGE_SIZE) {
5560 ret = -EINVAL;
5561 goto out;
5562 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005563 len &= PAGE_MASK;
5564 }
5565
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005566 again:
5567 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005568 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005569
Al Viroa786c062014-04-11 12:01:03 -04005570 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005571 struct page *page;
5572 int r;
5573
5574 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005575 if (!ref) {
5576 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005577 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005578 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005579
Steven Rostedt7267fa62009-04-29 00:16:21 -04005580 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005581 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005582 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005583 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005584 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005585 kfree(ref);
5586 break;
5587 }
5588
5589 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005590 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005591 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005592 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005593 kfree(ref);
5594 break;
5595 }
5596
5597 /*
5598 * zero out any left over data, this is going to
5599 * user land.
5600 */
5601 size = ring_buffer_page_len(ref->page);
5602 if (size < PAGE_SIZE)
5603 memset(ref->page + size, 0, PAGE_SIZE - size);
5604
5605 page = virt_to_page(ref->page);
5606
5607 spd.pages[i] = page;
5608 spd.partial[i].len = PAGE_SIZE;
5609 spd.partial[i].offset = 0;
5610 spd.partial[i].private = (unsigned long)ref;
5611 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005612 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005613
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005614 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005615 }
5616
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005617 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005618 spd.nr_pages = i;
5619
5620 /* did we read anything? */
5621 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005622 if (ret)
5623 goto out;
5624
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005625 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005626 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005627 goto out;
5628 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005629 mutex_unlock(&trace_types_lock);
Rabin Vincente30f53a2014-11-10 19:46:34 +01005630 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005631 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005632 if (ret)
5633 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005634
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005635 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005636 }
5637
5638 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005639 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005640out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005641 mutex_unlock(&trace_types_lock);
5642
Steven Rostedt2cadf912008-12-01 22:20:19 -05005643 return ret;
5644}
5645
5646static const struct file_operations tracing_buffers_fops = {
5647 .open = tracing_buffers_open,
5648 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005649 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005650 .release = tracing_buffers_release,
5651 .splice_read = tracing_buffers_splice_read,
5652 .llseek = no_llseek,
5653};
5654
Steven Rostedtc8d77182009-04-29 18:03:45 -04005655static ssize_t
5656tracing_stats_read(struct file *filp, char __user *ubuf,
5657 size_t count, loff_t *ppos)
5658{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005659 struct inode *inode = file_inode(filp);
5660 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005661 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005662 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005663 struct trace_seq *s;
5664 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005665 unsigned long long t;
5666 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005667
Li Zefane4f2d102009-06-15 10:57:28 +08005668 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005669 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005670 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005671
5672 trace_seq_init(s);
5673
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005674 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005675 trace_seq_printf(s, "entries: %ld\n", cnt);
5676
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005677 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005678 trace_seq_printf(s, "overrun: %ld\n", cnt);
5679
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005680 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005681 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5682
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005683 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005684 trace_seq_printf(s, "bytes: %ld\n", cnt);
5685
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005686 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005687 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005688 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005689 usec_rem = do_div(t, USEC_PER_SEC);
5690 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5691 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005692
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005693 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005694 usec_rem = do_div(t, USEC_PER_SEC);
5695 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5696 } else {
5697 /* counter or tsc mode for trace_clock */
5698 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005699 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005700
5701 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005702 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005703 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005704
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005705 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005706 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5707
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005708 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005709 trace_seq_printf(s, "read events: %ld\n", cnt);
5710
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005711 count = simple_read_from_buffer(ubuf, count, ppos,
5712 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005713
5714 kfree(s);
5715
5716 return count;
5717}
5718
5719static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005720 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005721 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005722 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005723 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005724};
5725
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005726#ifdef CONFIG_DYNAMIC_FTRACE
5727
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005728int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005729{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005730 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005731}
5732
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005733static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005734tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005735 size_t cnt, loff_t *ppos)
5736{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005737 static char ftrace_dyn_info_buffer[1024];
5738 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005739 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005740 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005741 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005742 int r;
5743
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005744 mutex_lock(&dyn_info_mutex);
5745 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005746
Steven Rostedta26a2a22008-10-31 00:03:22 -04005747 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005748 buf[r++] = '\n';
5749
5750 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5751
5752 mutex_unlock(&dyn_info_mutex);
5753
5754 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005755}
5756
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005757static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005758 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005759 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005760 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005761};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005762#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005763
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005764#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5765static void
5766ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005767{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005768 tracing_snapshot();
5769}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005770
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005771static void
5772ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5773{
5774 unsigned long *count = (long *)data;
5775
5776 if (!*count)
5777 return;
5778
5779 if (*count != -1)
5780 (*count)--;
5781
5782 tracing_snapshot();
5783}
5784
5785static int
5786ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5787 struct ftrace_probe_ops *ops, void *data)
5788{
5789 long count = (long)data;
5790
5791 seq_printf(m, "%ps:", (void *)ip);
5792
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005793 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005794
5795 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005796 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005797 else
5798 seq_printf(m, ":count=%ld\n", count);
5799
5800 return 0;
5801}
5802
5803static struct ftrace_probe_ops snapshot_probe_ops = {
5804 .func = ftrace_snapshot,
5805 .print = ftrace_snapshot_print,
5806};
5807
5808static struct ftrace_probe_ops snapshot_count_probe_ops = {
5809 .func = ftrace_count_snapshot,
5810 .print = ftrace_snapshot_print,
5811};
5812
5813static int
5814ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5815 char *glob, char *cmd, char *param, int enable)
5816{
5817 struct ftrace_probe_ops *ops;
5818 void *count = (void *)-1;
5819 char *number;
5820 int ret;
5821
5822 /* hash funcs only work with set_ftrace_filter */
5823 if (!enable)
5824 return -EINVAL;
5825
5826 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5827
5828 if (glob[0] == '!') {
5829 unregister_ftrace_function_probe_func(glob+1, ops);
5830 return 0;
5831 }
5832
5833 if (!param)
5834 goto out_reg;
5835
5836 number = strsep(&param, ":");
5837
5838 if (!strlen(number))
5839 goto out_reg;
5840
5841 /*
5842 * We use the callback data field (which is a pointer)
5843 * as our counter.
5844 */
5845 ret = kstrtoul(number, 0, (unsigned long *)&count);
5846 if (ret)
5847 return ret;
5848
5849 out_reg:
5850 ret = register_ftrace_function_probe(glob, ops, count);
5851
5852 if (ret >= 0)
5853 alloc_snapshot(&global_trace);
5854
5855 return ret < 0 ? ret : 0;
5856}
5857
5858static struct ftrace_func_command ftrace_snapshot_cmd = {
5859 .name = "snapshot",
5860 .func = ftrace_trace_snapshot_callback,
5861};
5862
Tom Zanussi38de93a2013-10-24 08:34:18 -05005863static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005864{
5865 return register_ftrace_command(&ftrace_snapshot_cmd);
5866}
5867#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005868static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005869#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005870
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005871struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005872{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005873 if (tr->dir)
5874 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005875
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005876 if (!debugfs_initialized())
5877 return NULL;
5878
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005879 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5880 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005881
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005882 if (!tr->dir)
5883 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005884
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005885 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005886}
5887
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005888struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005889{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005890 return tracing_init_dentry_tr(&global_trace);
5891}
5892
5893static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5894{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005895 struct dentry *d_tracer;
5896
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005897 if (tr->percpu_dir)
5898 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005899
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005900 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005901 if (!d_tracer)
5902 return NULL;
5903
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005904 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005905
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005906 WARN_ONCE(!tr->percpu_dir,
5907 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005908
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005909 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005910}
5911
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005912static struct dentry *
5913trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5914 void *data, long cpu, const struct file_operations *fops)
5915{
5916 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5917
5918 if (ret) /* See tracing_get_cpu() */
5919 ret->d_inode->i_cdev = (void *)(cpu + 1);
5920 return ret;
5921}
5922
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005923static void
5924tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005925{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005926 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005927 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005928 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005929
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005930 if (!d_percpu)
5931 return;
5932
Steven Rostedtdd49a382010-10-20 21:51:26 -04005933 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005934 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5935 if (!d_cpu) {
5936 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5937 return;
5938 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005939
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005940 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005941 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005942 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005943
5944 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005945 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005946 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005947
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005948 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005949 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005950
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005951 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005952 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005953
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005954 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005955 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005956
5957#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005958 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005959 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005960
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005961 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005962 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005963#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005964}
5965
Steven Rostedt60a11772008-05-12 21:20:44 +02005966#ifdef CONFIG_FTRACE_SELFTEST
5967/* Let selftest have access to static functions in this file */
5968#include "trace_selftest.c"
5969#endif
5970
Steven Rostedt577b7852009-02-26 23:43:05 -05005971struct trace_option_dentry {
5972 struct tracer_opt *opt;
5973 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005974 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005975 struct dentry *entry;
5976};
5977
5978static ssize_t
5979trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5980 loff_t *ppos)
5981{
5982 struct trace_option_dentry *topt = filp->private_data;
5983 char *buf;
5984
5985 if (topt->flags->val & topt->opt->bit)
5986 buf = "1\n";
5987 else
5988 buf = "0\n";
5989
5990 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5991}
5992
5993static ssize_t
5994trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5995 loff_t *ppos)
5996{
5997 struct trace_option_dentry *topt = filp->private_data;
5998 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005999 int ret;
6000
Peter Huewe22fe9b52011-06-07 21:58:27 +02006001 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6002 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006003 return ret;
6004
Li Zefan8d18eaa2009-12-08 11:17:06 +08006005 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006006 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006007
6008 if (!!(topt->flags->val & topt->opt->bit) != val) {
6009 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006010 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006011 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006012 mutex_unlock(&trace_types_lock);
6013 if (ret)
6014 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006015 }
6016
6017 *ppos += cnt;
6018
6019 return cnt;
6020}
6021
6022
6023static const struct file_operations trace_options_fops = {
6024 .open = tracing_open_generic,
6025 .read = trace_options_read,
6026 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006027 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006028};
6029
Steven Rostedta8259072009-02-26 22:19:12 -05006030static ssize_t
6031trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6032 loff_t *ppos)
6033{
6034 long index = (long)filp->private_data;
6035 char *buf;
6036
6037 if (trace_flags & (1 << index))
6038 buf = "1\n";
6039 else
6040 buf = "0\n";
6041
6042 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6043}
6044
6045static ssize_t
6046trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6047 loff_t *ppos)
6048{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006049 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006050 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006051 unsigned long val;
6052 int ret;
6053
Peter Huewe22fe9b52011-06-07 21:58:27 +02006054 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6055 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006056 return ret;
6057
Zhaoleif2d84b62009-08-07 18:55:48 +08006058 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006059 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006060
6061 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006062 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006063 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006064
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006065 if (ret < 0)
6066 return ret;
6067
Steven Rostedta8259072009-02-26 22:19:12 -05006068 *ppos += cnt;
6069
6070 return cnt;
6071}
6072
Steven Rostedta8259072009-02-26 22:19:12 -05006073static const struct file_operations trace_options_core_fops = {
6074 .open = tracing_open_generic,
6075 .read = trace_options_core_read,
6076 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006077 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006078};
6079
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006080struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04006081 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006082 struct dentry *parent,
6083 void *data,
6084 const struct file_operations *fops)
6085{
6086 struct dentry *ret;
6087
6088 ret = debugfs_create_file(name, mode, parent, data, fops);
6089 if (!ret)
6090 pr_warning("Could not create debugfs '%s' entry\n", name);
6091
6092 return ret;
6093}
6094
6095
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006096static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006097{
6098 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006099
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006100 if (tr->options)
6101 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006102
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006103 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006104 if (!d_tracer)
6105 return NULL;
6106
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006107 tr->options = debugfs_create_dir("options", d_tracer);
6108 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006109 pr_warning("Could not create debugfs directory 'options'\n");
6110 return NULL;
6111 }
6112
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006113 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006114}
6115
Steven Rostedt577b7852009-02-26 23:43:05 -05006116static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006117create_trace_option_file(struct trace_array *tr,
6118 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006119 struct tracer_flags *flags,
6120 struct tracer_opt *opt)
6121{
6122 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006123
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006124 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006125 if (!t_options)
6126 return;
6127
6128 topt->flags = flags;
6129 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006130 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006131
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006132 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006133 &trace_options_fops);
6134
Steven Rostedt577b7852009-02-26 23:43:05 -05006135}
6136
6137static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006138create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006139{
6140 struct trace_option_dentry *topts;
6141 struct tracer_flags *flags;
6142 struct tracer_opt *opts;
6143 int cnt;
6144
6145 if (!tracer)
6146 return NULL;
6147
6148 flags = tracer->flags;
6149
6150 if (!flags || !flags->opts)
6151 return NULL;
6152
6153 opts = flags->opts;
6154
6155 for (cnt = 0; opts[cnt].name; cnt++)
6156 ;
6157
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006158 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006159 if (!topts)
6160 return NULL;
6161
6162 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006163 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006164 &opts[cnt]);
6165
6166 return topts;
6167}
6168
6169static void
6170destroy_trace_option_files(struct trace_option_dentry *topts)
6171{
6172 int cnt;
6173
6174 if (!topts)
6175 return;
6176
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006177 for (cnt = 0; topts[cnt].opt; cnt++)
6178 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006179
6180 kfree(topts);
6181}
6182
Steven Rostedta8259072009-02-26 22:19:12 -05006183static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006184create_trace_option_core_file(struct trace_array *tr,
6185 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006186{
6187 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006188
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006189 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006190 if (!t_options)
6191 return NULL;
6192
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006193 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006194 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006195}
6196
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006197static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006198{
6199 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006200 int i;
6201
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006202 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006203 if (!t_options)
6204 return;
6205
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006206 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006207 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006208}
6209
Steven Rostedt499e5472012-02-22 15:50:28 -05006210static ssize_t
6211rb_simple_read(struct file *filp, char __user *ubuf,
6212 size_t cnt, loff_t *ppos)
6213{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006214 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006215 char buf[64];
6216 int r;
6217
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006218 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006219 r = sprintf(buf, "%d\n", r);
6220
6221 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6222}
6223
6224static ssize_t
6225rb_simple_write(struct file *filp, const char __user *ubuf,
6226 size_t cnt, loff_t *ppos)
6227{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006228 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006229 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006230 unsigned long val;
6231 int ret;
6232
6233 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6234 if (ret)
6235 return ret;
6236
6237 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006238 mutex_lock(&trace_types_lock);
6239 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006240 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006241 if (tr->current_trace->start)
6242 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006243 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006244 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006245 if (tr->current_trace->stop)
6246 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006247 }
6248 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006249 }
6250
6251 (*ppos)++;
6252
6253 return cnt;
6254}
6255
6256static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006257 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006258 .read = rb_simple_read,
6259 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006260 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006261 .llseek = default_llseek,
6262};
6263
Steven Rostedt277ba042012-08-03 16:10:49 -04006264struct dentry *trace_instance_dir;
6265
6266static void
6267init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6268
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006269static int
6270allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006271{
6272 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006273
6274 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6275
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006276 buf->tr = tr;
6277
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006278 buf->buffer = ring_buffer_alloc(size, rb_flags);
6279 if (!buf->buffer)
6280 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006281
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006282 buf->data = alloc_percpu(struct trace_array_cpu);
6283 if (!buf->data) {
6284 ring_buffer_free(buf->buffer);
6285 return -ENOMEM;
6286 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006287
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006288 /* Allocate the first page for all buffers */
6289 set_buffer_entries(&tr->trace_buffer,
6290 ring_buffer_size(tr->trace_buffer.buffer, 0));
6291
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006292 return 0;
6293}
6294
6295static int allocate_trace_buffers(struct trace_array *tr, int size)
6296{
6297 int ret;
6298
6299 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6300 if (ret)
6301 return ret;
6302
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006303#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006304 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6305 allocate_snapshot ? size : 1);
6306 if (WARN_ON(ret)) {
6307 ring_buffer_free(tr->trace_buffer.buffer);
6308 free_percpu(tr->trace_buffer.data);
6309 return -ENOMEM;
6310 }
6311 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006312
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006313 /*
6314 * Only the top level trace array gets its snapshot allocated
6315 * from the kernel command line.
6316 */
6317 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006318#endif
6319 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006320}
6321
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006322static void free_trace_buffer(struct trace_buffer *buf)
6323{
6324 if (buf->buffer) {
6325 ring_buffer_free(buf->buffer);
6326 buf->buffer = NULL;
6327 free_percpu(buf->data);
6328 buf->data = NULL;
6329 }
6330}
6331
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006332static void free_trace_buffers(struct trace_array *tr)
6333{
6334 if (!tr)
6335 return;
6336
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006337 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006338
6339#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006340 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006341#endif
6342}
6343
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006344static int new_instance_create(const char *name)
6345{
Steven Rostedt277ba042012-08-03 16:10:49 -04006346 struct trace_array *tr;
6347 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006348
6349 mutex_lock(&trace_types_lock);
6350
6351 ret = -EEXIST;
6352 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6353 if (tr->name && strcmp(tr->name, name) == 0)
6354 goto out_unlock;
6355 }
6356
6357 ret = -ENOMEM;
6358 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6359 if (!tr)
6360 goto out_unlock;
6361
6362 tr->name = kstrdup(name, GFP_KERNEL);
6363 if (!tr->name)
6364 goto out_free_tr;
6365
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006366 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6367 goto out_free_tr;
6368
6369 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6370
Steven Rostedt277ba042012-08-03 16:10:49 -04006371 raw_spin_lock_init(&tr->start_lock);
6372
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006373 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6374
Steven Rostedt277ba042012-08-03 16:10:49 -04006375 tr->current_trace = &nop_trace;
6376
6377 INIT_LIST_HEAD(&tr->systems);
6378 INIT_LIST_HEAD(&tr->events);
6379
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006380 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006381 goto out_free_tr;
6382
Steven Rostedt277ba042012-08-03 16:10:49 -04006383 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6384 if (!tr->dir)
6385 goto out_free_tr;
6386
6387 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006388 if (ret) {
6389 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006390 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006391 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006392
6393 init_tracer_debugfs(tr, tr->dir);
6394
6395 list_add(&tr->list, &ftrace_trace_arrays);
6396
6397 mutex_unlock(&trace_types_lock);
6398
6399 return 0;
6400
6401 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006402 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006403 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006404 kfree(tr->name);
6405 kfree(tr);
6406
6407 out_unlock:
6408 mutex_unlock(&trace_types_lock);
6409
6410 return ret;
6411
6412}
6413
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006414static int instance_delete(const char *name)
6415{
6416 struct trace_array *tr;
6417 int found = 0;
6418 int ret;
6419
6420 mutex_lock(&trace_types_lock);
6421
6422 ret = -ENODEV;
6423 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6424 if (tr->name && strcmp(tr->name, name) == 0) {
6425 found = 1;
6426 break;
6427 }
6428 }
6429 if (!found)
6430 goto out_unlock;
6431
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006432 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006433 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006434 goto out_unlock;
6435
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006436 list_del(&tr->list);
6437
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006438 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006439 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006440 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006441 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006442 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006443
6444 kfree(tr->name);
6445 kfree(tr);
6446
6447 ret = 0;
6448
6449 out_unlock:
6450 mutex_unlock(&trace_types_lock);
6451
6452 return ret;
6453}
6454
Steven Rostedt277ba042012-08-03 16:10:49 -04006455static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6456{
6457 struct dentry *parent;
6458 int ret;
6459
6460 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006461 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt277ba042012-08-03 16:10:49 -04006462 if (WARN_ON_ONCE(parent != trace_instance_dir))
6463 return -ENOENT;
6464
6465 /*
6466 * The inode mutex is locked, but debugfs_create_dir() will also
6467 * take the mutex. As the instances directory can not be destroyed
6468 * or changed in any other way, it is safe to unlock it, and
6469 * let the dentry try. If two users try to make the same dir at
6470 * the same time, then the new_instance_create() will determine the
6471 * winner.
6472 */
6473 mutex_unlock(&inode->i_mutex);
6474
6475 ret = new_instance_create(dentry->d_iname);
6476
6477 mutex_lock(&inode->i_mutex);
6478
6479 return ret;
6480}
6481
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006482static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6483{
6484 struct dentry *parent;
6485 int ret;
6486
6487 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006488 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006489 if (WARN_ON_ONCE(parent != trace_instance_dir))
6490 return -ENOENT;
6491
6492 /* The caller did a dget() on dentry */
6493 mutex_unlock(&dentry->d_inode->i_mutex);
6494
6495 /*
6496 * The inode mutex is locked, but debugfs_create_dir() will also
6497 * take the mutex. As the instances directory can not be destroyed
6498 * or changed in any other way, it is safe to unlock it, and
6499 * let the dentry try. If two users try to make the same dir at
6500 * the same time, then the instance_delete() will determine the
6501 * winner.
6502 */
6503 mutex_unlock(&inode->i_mutex);
6504
6505 ret = instance_delete(dentry->d_iname);
6506
6507 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6508 mutex_lock(&dentry->d_inode->i_mutex);
6509
6510 return ret;
6511}
6512
Steven Rostedt277ba042012-08-03 16:10:49 -04006513static const struct inode_operations instance_dir_inode_operations = {
6514 .lookup = simple_lookup,
6515 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006516 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006517};
6518
6519static __init void create_trace_instances(struct dentry *d_tracer)
6520{
6521 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6522 if (WARN_ON(!trace_instance_dir))
6523 return;
6524
6525 /* Hijack the dir inode operations, to allow mkdir */
6526 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6527}
6528
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006529static void
6530init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6531{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006532 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006533
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006534 trace_create_file("available_tracers", 0444, d_tracer,
6535 tr, &show_traces_fops);
6536
6537 trace_create_file("current_tracer", 0644, d_tracer,
6538 tr, &set_tracer_fops);
6539
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006540 trace_create_file("tracing_cpumask", 0644, d_tracer,
6541 tr, &tracing_cpumask_fops);
6542
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006543 trace_create_file("trace_options", 0644, d_tracer,
6544 tr, &tracing_iter_fops);
6545
6546 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006547 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006548
6549 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006550 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006551
6552 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006553 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006554
6555 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6556 tr, &tracing_total_entries_fops);
6557
Wang YanQing238ae932013-05-26 16:52:01 +08006558 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006559 tr, &tracing_free_buffer_fops);
6560
6561 trace_create_file("trace_marker", 0220, d_tracer,
6562 tr, &tracing_mark_fops);
6563
6564 trace_create_file("trace_clock", 0644, d_tracer, tr,
6565 &trace_clock_fops);
6566
6567 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006568 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006569
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006570#ifdef CONFIG_TRACER_MAX_TRACE
6571 trace_create_file("tracing_max_latency", 0644, d_tracer,
6572 &tr->max_latency, &tracing_max_lat_fops);
6573#endif
6574
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006575 if (ftrace_create_function_files(tr, d_tracer))
6576 WARN(1, "Could not allocate function filter files");
6577
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006578#ifdef CONFIG_TRACER_SNAPSHOT
6579 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006580 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006581#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006582
6583 for_each_tracing_cpu(cpu)
6584 tracing_init_debugfs_percpu(tr, cpu);
6585
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006586}
6587
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006588static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006589{
6590 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006591
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006592 trace_access_lock_init();
6593
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006594 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006595 if (!d_tracer)
6596 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006597
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006598 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006599
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006600 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006601 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006602
Li Zefan339ae5d2009-04-17 10:34:30 +08006603 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006604 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006605
Avadh Patel69abe6a2009-04-10 16:04:48 -04006606 trace_create_file("saved_cmdlines", 0444, d_tracer,
6607 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006608
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006609 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6610 NULL, &tracing_saved_cmdlines_size_fops);
6611
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006612#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006613 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6614 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006615#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006616
Steven Rostedt277ba042012-08-03 16:10:49 -04006617 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006618
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006619 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006620
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006621 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006622}
6623
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006624static int trace_panic_handler(struct notifier_block *this,
6625 unsigned long event, void *unused)
6626{
Steven Rostedt944ac422008-10-23 19:26:08 -04006627 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006628 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006629 return NOTIFY_OK;
6630}
6631
6632static struct notifier_block trace_panic_notifier = {
6633 .notifier_call = trace_panic_handler,
6634 .next = NULL,
6635 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6636};
6637
6638static int trace_die_handler(struct notifier_block *self,
6639 unsigned long val,
6640 void *data)
6641{
6642 switch (val) {
6643 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006644 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006645 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006646 break;
6647 default:
6648 break;
6649 }
6650 return NOTIFY_OK;
6651}
6652
6653static struct notifier_block trace_die_notifier = {
6654 .notifier_call = trace_die_handler,
6655 .priority = 200
6656};
6657
6658/*
6659 * printk is set to max of 1024, we really don't need it that big.
6660 * Nothing should be printing 1000 characters anyway.
6661 */
6662#define TRACE_MAX_PRINT 1000
6663
6664/*
6665 * Define here KERN_TRACE so that we have one place to modify
6666 * it if we decide to change what log level the ftrace dump
6667 * should be at.
6668 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006669#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006670
Jason Wessel955b61e2010-08-05 09:22:23 -05006671void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006672trace_printk_seq(struct trace_seq *s)
6673{
6674 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006675 if (s->seq.len >= TRACE_MAX_PRINT)
6676 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006677
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05006678 /*
6679 * More paranoid code. Although the buffer size is set to
6680 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6681 * an extra layer of protection.
6682 */
6683 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6684 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006685
6686 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006687 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006688
6689 printk(KERN_TRACE "%s", s->buffer);
6690
Steven Rostedtf9520752009-03-02 14:04:40 -05006691 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006692}
6693
Jason Wessel955b61e2010-08-05 09:22:23 -05006694void trace_init_global_iter(struct trace_iterator *iter)
6695{
6696 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006697 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006698 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006699 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006700
6701 if (iter->trace && iter->trace->open)
6702 iter->trace->open(iter);
6703
6704 /* Annotate start of buffers if we had overruns */
6705 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6706 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6707
6708 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6709 if (trace_clocks[iter->tr->clock_id].in_ns)
6710 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006711}
6712
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006713void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006714{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006715 /* use static because iter can be a bit big for the stack */
6716 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006717 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006718 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006719 unsigned long flags;
6720 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006721
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006722 /* Only allow one dump user at a time. */
6723 if (atomic_inc_return(&dump_running) != 1) {
6724 atomic_dec(&dump_running);
6725 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006726 }
6727
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006728 /*
6729 * Always turn off tracing when we dump.
6730 * We don't need to show trace output of what happens
6731 * between multiple crashes.
6732 *
6733 * If the user does a sysrq-z, then they can re-enable
6734 * tracing with echo 1 > tracing_on.
6735 */
6736 tracing_off();
6737
6738 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006739
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006740 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006741 trace_init_global_iter(&iter);
6742
Steven Rostedtd7690412008-10-01 00:29:53 -04006743 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006744 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006745 }
6746
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006747 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6748
Török Edwinb54d3de2008-11-22 13:28:48 +02006749 /* don't look at user memory in panic mode */
6750 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6751
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006752 switch (oops_dump_mode) {
6753 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006754 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006755 break;
6756 case DUMP_ORIG:
6757 iter.cpu_file = raw_smp_processor_id();
6758 break;
6759 case DUMP_NONE:
6760 goto out_enable;
6761 default:
6762 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006763 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006764 }
6765
6766 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006767
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006768 /* Did function tracer already get disabled? */
6769 if (ftrace_is_dead()) {
6770 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6771 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6772 }
6773
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006774 /*
6775 * We need to stop all tracing on all CPUS to read the
6776 * the next buffer. This is a bit expensive, but is
6777 * not done often. We fill all what we can read,
6778 * and then release the locks again.
6779 */
6780
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006781 while (!trace_empty(&iter)) {
6782
6783 if (!cnt)
6784 printk(KERN_TRACE "---------------------------------\n");
6785
6786 cnt++;
6787
6788 /* reset all but tr, trace, and overruns */
6789 memset(&iter.seq, 0,
6790 sizeof(struct trace_iterator) -
6791 offsetof(struct trace_iterator, seq));
6792 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6793 iter.pos = -1;
6794
Jason Wessel955b61e2010-08-05 09:22:23 -05006795 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006796 int ret;
6797
6798 ret = print_trace_line(&iter);
6799 if (ret != TRACE_TYPE_NO_CONSUME)
6800 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006801 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006802 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006803
6804 trace_printk_seq(&iter.seq);
6805 }
6806
6807 if (!cnt)
6808 printk(KERN_TRACE " (ftrace buffer empty)\n");
6809 else
6810 printk(KERN_TRACE "---------------------------------\n");
6811
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006812 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006813 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006814
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006815 for_each_tracing_cpu(cpu) {
6816 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006817 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006818 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006819 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006820}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006821EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006822
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006823__init static int tracer_alloc_buffers(void)
6824{
Steven Rostedt73c51622009-03-11 13:42:01 -04006825 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306826 int ret = -ENOMEM;
6827
David Sharp750912f2010-12-08 13:46:47 -08006828
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306829 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6830 goto out;
6831
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006832 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306833 goto out_free_buffer_mask;
6834
Steven Rostedt07d777f2011-09-22 14:01:55 -04006835 /* Only allocate trace_printk buffers if a trace_printk exists */
6836 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006837 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006838 trace_printk_init_buffers();
6839
Steven Rostedt73c51622009-03-11 13:42:01 -04006840 /* To save memory, keep the ring buffer size to its minimum */
6841 if (ring_buffer_expanded)
6842 ring_buf_size = trace_buf_size;
6843 else
6844 ring_buf_size = 1;
6845
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306846 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006847 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006848
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006849 raw_spin_lock_init(&global_trace.start_lock);
6850
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006851 /* Used for event triggers */
6852 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6853 if (!temp_buffer)
6854 goto out_free_cpumask;
6855
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006856 if (trace_create_savedcmd() < 0)
6857 goto out_free_temp_buffer;
6858
Steven Rostedtab464282008-05-12 21:21:00 +02006859 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006860 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006861 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6862 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006863 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006864 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006865
Steven Rostedt499e5472012-02-22 15:50:28 -05006866 if (global_trace.buffer_disabled)
6867 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006868
Steven Rostedte1e232c2014-02-10 23:38:46 -05006869 if (trace_boot_clock) {
6870 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6871 if (ret < 0)
6872 pr_warning("Trace clock %s not defined, going back to default\n",
6873 trace_boot_clock);
6874 }
6875
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006876 /*
6877 * register_tracer() might reference current_trace, so it
6878 * needs to be set before we register anything. This is
6879 * just a bootstrap of current_trace anyway.
6880 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006881 global_trace.current_trace = &nop_trace;
6882
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006883 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6884
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006885 ftrace_init_global_array_ops(&global_trace);
6886
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006887 register_tracer(&nop_trace);
6888
Steven Rostedt60a11772008-05-12 21:20:44 +02006889 /* All seems OK, enable tracing */
6890 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006891
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006892 atomic_notifier_chain_register(&panic_notifier_list,
6893 &trace_panic_notifier);
6894
6895 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006896
Steven Rostedtae63b31e2012-05-03 23:09:03 -04006897 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6898
6899 INIT_LIST_HEAD(&global_trace.systems);
6900 INIT_LIST_HEAD(&global_trace.events);
6901 list_add(&global_trace.list, &ftrace_trace_arrays);
6902
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006903 while (trace_boot_options) {
6904 char *option;
6905
6906 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006907 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006908 }
6909
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006910 register_snapshot_cmd();
6911
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006912 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006913
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006914out_free_savedcmd:
6915 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006916out_free_temp_buffer:
6917 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306918out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006919 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306920out_free_buffer_mask:
6921 free_cpumask_var(tracing_buffer_mask);
6922out:
6923 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006924}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006925
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006926void __init trace_init(void)
6927{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05006928 if (tracepoint_printk) {
6929 tracepoint_print_iter =
6930 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6931 if (WARN_ON(!tracepoint_print_iter))
6932 tracepoint_printk = 0;
6933 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006934 tracer_alloc_buffers();
6935 init_ftrace_syscalls();
6936 trace_event_init();
6937}
6938
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006939__init static int clear_boot_tracer(void)
6940{
6941 /*
6942 * The default tracer at boot buffer is an init section.
6943 * This function is called in lateinit. If we did not
6944 * find the boot tracer, then clear it out, to prevent
6945 * later registration from accessing the buffer that is
6946 * about to be freed.
6947 */
6948 if (!default_bootup_tracer)
6949 return 0;
6950
6951 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6952 default_bootup_tracer);
6953 default_bootup_tracer = NULL;
6954
6955 return 0;
6956}
6957
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006958fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006959late_initcall(clear_boot_tracer);