blob: ebf49649534c6854b27ad95ceee44e86ce34b1d0 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050066/* Pipe tracepoints to printk */
67struct trace_iterator *tracepoint_print_iter;
68int tracepoint_printk;
69
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010070/* For tracers that don't implement custom flags */
71static struct tracer_opt dummy_tracer_opt[] = {
72 { }
73};
74
75static struct tracer_flags dummy_tracer_flags = {
76 .val = 0,
77 .opts = dummy_tracer_opt
78};
79
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050080static int
81dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010082{
83 return 0;
84}
Steven Rostedt0f048702008-11-05 16:05:44 -050085
86/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040087 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
89 * occurred.
90 */
91static DEFINE_PER_CPU(bool, trace_cmdline_save);
92
93/*
Steven Rostedt0f048702008-11-05 16:05:44 -050094 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
97 * this back to zero.
98 */
Hannes Eder4fd27352009-02-10 19:44:12 +010099static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500100
Christoph Lameter9288f992009-10-07 19:17:45 -0400101DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -0400102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500126static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500127
Li Zefanee6c2c12009-09-18 14:06:47 +0800128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500130static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500132static bool allocate_snapshot;
133
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200134static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100135{
Chen Gang67012ab2013-04-08 12:06:44 +0800136 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500137 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400138 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500139 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100140 return 1;
141}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200142__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100143
Steven Rostedt944ac422008-10-23 19:26:08 -0400144static int __init set_ftrace_dump_on_oops(char *str)
145{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200146 if (*str++ != '=' || !*str) {
147 ftrace_dump_on_oops = DUMP_ALL;
148 return 1;
149 }
150
151 if (!strcmp("orig_cpu", str)) {
152 ftrace_dump_on_oops = DUMP_ORIG;
153 return 1;
154 }
155
156 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400157}
158__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200159
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400160static int __init stop_trace_on_warning(char *str)
161{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200162 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400164 return 1;
165}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200166__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400167
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400168static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500169{
170 allocate_snapshot = true;
171 /* We also need the main ring buffer expanded */
172 ring_buffer_expanded = true;
173 return 1;
174}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400175__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500176
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400177
178static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179static char *trace_boot_options __initdata;
180
181static int __init set_trace_boot_options(char *str)
182{
Chen Gang67012ab2013-04-08 12:06:44 +0800183 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400184 trace_boot_options = trace_boot_options_buf;
185 return 0;
186}
187__setup("trace_options=", set_trace_boot_options);
188
Steven Rostedte1e232c2014-02-10 23:38:46 -0500189static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190static char *trace_boot_clock __initdata;
191
192static int __init set_trace_boot_clock(char *str)
193{
194 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195 trace_boot_clock = trace_boot_clock_buf;
196 return 0;
197}
198__setup("trace_clock=", set_trace_boot_clock);
199
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500200static int __init set_tracepoint_printk(char *str)
201{
202 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203 tracepoint_printk = 1;
204 return 1;
205}
206__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400207
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800208unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200209{
210 nsec += 500;
211 do_div(nsec, 1000);
212 return nsec;
213}
214
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200215/*
216 * The global_trace is the descriptor that holds the tracing
217 * buffers for the live tracing. For each CPU, it contains
218 * a link list of pages that will store trace entries. The
219 * page descriptor of the pages in the memory is used to hold
220 * the link list by linking the lru item in the page descriptor
221 * to each of the pages in the buffer per CPU.
222 *
223 * For each active CPU there is a data field that holds the
224 * pages for the buffer for that CPU. Each CPU has the same number
225 * of pages allocated for its buffer.
226 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200227static struct trace_array global_trace;
228
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400229LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200230
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400231int trace_array_get(struct trace_array *this_tr)
232{
233 struct trace_array *tr;
234 int ret = -ENODEV;
235
236 mutex_lock(&trace_types_lock);
237 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
238 if (tr == this_tr) {
239 tr->ref++;
240 ret = 0;
241 break;
242 }
243 }
244 mutex_unlock(&trace_types_lock);
245
246 return ret;
247}
248
249static void __trace_array_put(struct trace_array *this_tr)
250{
251 WARN_ON(!this_tr->ref);
252 this_tr->ref--;
253}
254
255void trace_array_put(struct trace_array *this_tr)
256{
257 mutex_lock(&trace_types_lock);
258 __trace_array_put(this_tr);
259 mutex_unlock(&trace_types_lock);
260}
261
Tom Zanussif306cc82013-10-24 08:34:17 -0500262int filter_check_discard(struct ftrace_event_file *file, void *rec,
263 struct ring_buffer *buffer,
264 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500265{
Tom Zanussif306cc82013-10-24 08:34:17 -0500266 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267 !filter_match_preds(file->filter, rec)) {
268 ring_buffer_discard_commit(buffer, event);
269 return 1;
270 }
271
272 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500273}
Tom Zanussif306cc82013-10-24 08:34:17 -0500274EXPORT_SYMBOL_GPL(filter_check_discard);
275
276int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277 struct ring_buffer *buffer,
278 struct ring_buffer_event *event)
279{
280 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281 !filter_match_preds(call->filter, rec)) {
282 ring_buffer_discard_commit(buffer, event);
283 return 1;
284 }
285
286 return 0;
287}
288EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500289
Fabian Frederickad1438a2014-04-17 21:44:42 +0200290static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400291{
292 u64 ts;
293
294 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700295 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400296 return trace_clock_local();
297
Alexander Z Lam94571582013-08-02 18:36:16 -0700298 ts = ring_buffer_time_stamp(buf->buffer, cpu);
299 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400300
301 return ts;
302}
303
Alexander Z Lam94571582013-08-02 18:36:16 -0700304cycle_t ftrace_now(int cpu)
305{
306 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
307}
308
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400309/**
310 * tracing_is_enabled - Show if global_trace has been disabled
311 *
312 * Shows if the global trace has been enabled or not. It uses the
313 * mirror flag "buffer_disabled" to be used in fast paths such as for
314 * the irqsoff tracer. But it may be inaccurate due to races. If you
315 * need to know the accurate state, use tracing_is_on() which is a little
316 * slower, but accurate.
317 */
Steven Rostedt90369902008-11-05 16:05:44 -0500318int tracing_is_enabled(void)
319{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400320 /*
321 * For quick access (irqsoff uses this in fast path), just
322 * return the mirror variable of the state of the ring buffer.
323 * It's a little racy, but we don't really care.
324 */
325 smp_rmb();
326 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500327}
328
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200329/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400330 * trace_buf_size is the size in bytes that is allocated
331 * for a buffer. Note, the number of bytes is always rounded
332 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400333 *
334 * This number is purposely set to a low number of 16384.
335 * If the dump on oops happens, it will be much appreciated
336 * to not have to wait for all that output. Anyway this can be
337 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400339#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400340
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400341static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200342
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200343/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200344static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200345
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200346/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200347 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200348 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700349DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200350
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800351/*
352 * serialize the access of the ring buffer
353 *
354 * ring buffer serializes readers, but it is low level protection.
355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
356 * are not protected by ring buffer.
357 *
358 * The content of events may become garbage if we allow other process consumes
359 * these events concurrently:
360 * A) the page of the consumed events may become a normal page
361 * (not reader page) in ring buffer, and this page will be rewrited
362 * by events producer.
363 * B) The page of the consumed events may become a page for splice_read,
364 * and this page will be returned to system.
365 *
366 * These primitives allow multi process access to different cpu ring buffer
367 * concurrently.
368 *
369 * These primitives don't distinguish read-only and read-consume access.
370 * Multi read-only access are also serialized.
371 */
372
373#ifdef CONFIG_SMP
374static DECLARE_RWSEM(all_cpu_access_lock);
375static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
376
377static inline void trace_access_lock(int cpu)
378{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500379 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800380 /* gain it for accessing the whole ring buffer. */
381 down_write(&all_cpu_access_lock);
382 } else {
383 /* gain it for accessing a cpu ring buffer. */
384
Steven Rostedtae3b5092013-01-23 15:22:59 -0500385 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800386 down_read(&all_cpu_access_lock);
387
388 /* Secondly block other access to this @cpu ring buffer. */
389 mutex_lock(&per_cpu(cpu_access_lock, cpu));
390 }
391}
392
393static inline void trace_access_unlock(int cpu)
394{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500395 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800396 up_write(&all_cpu_access_lock);
397 } else {
398 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399 up_read(&all_cpu_access_lock);
400 }
401}
402
403static inline void trace_access_lock_init(void)
404{
405 int cpu;
406
407 for_each_possible_cpu(cpu)
408 mutex_init(&per_cpu(cpu_access_lock, cpu));
409}
410
411#else
412
413static DEFINE_MUTEX(access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
417 (void)cpu;
418 mutex_lock(&access_lock);
419}
420
421static inline void trace_access_unlock(int cpu)
422{
423 (void)cpu;
424 mutex_unlock(&access_lock);
425}
426
427static inline void trace_access_lock_init(void)
428{
429}
430
431#endif
432
Steven Rostedtee6bce52008-11-12 17:52:37 -0500433/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500434unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400435 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500436 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400437 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700438
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400439static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400440{
441 if (tr->trace_buffer.buffer)
442 ring_buffer_record_on(tr->trace_buffer.buffer);
443 /*
444 * This flag is looked at when buffers haven't been allocated
445 * yet, or by some tracers (like irqsoff), that just want to
446 * know if the ring buffer has been disabled, but it can handle
447 * races of where it gets disabled but we still do a record.
448 * As the check is in the fast path of the tracers, it is more
449 * important to be fast than accurate.
450 */
451 tr->buffer_disabled = 0;
452 /* Make the flag seen by readers */
453 smp_wmb();
454}
455
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200456/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500457 * tracing_on - enable tracing buffers
458 *
459 * This function enables tracing buffers that may have been
460 * disabled with tracing_off.
461 */
462void tracing_on(void)
463{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400464 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500465}
466EXPORT_SYMBOL_GPL(tracing_on);
467
468/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500469 * __trace_puts - write a constant string into the trace buffer.
470 * @ip: The address of the caller
471 * @str: The constant string to write
472 * @size: The size of the string.
473 */
474int __trace_puts(unsigned long ip, const char *str, int size)
475{
476 struct ring_buffer_event *event;
477 struct ring_buffer *buffer;
478 struct print_entry *entry;
479 unsigned long irq_flags;
480 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800481 int pc;
482
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800483 if (!(trace_flags & TRACE_ITER_PRINTK))
484 return 0;
485
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800486 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500487
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500488 if (unlikely(tracing_selftest_running || tracing_disabled))
489 return 0;
490
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500491 alloc = sizeof(*entry) + size + 2; /* possible \n added */
492
493 local_save_flags(irq_flags);
494 buffer = global_trace.trace_buffer.buffer;
495 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800496 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500497 if (!event)
498 return 0;
499
500 entry = ring_buffer_event_data(event);
501 entry->ip = ip;
502
503 memcpy(&entry->buf, str, size);
504
505 /* Add a newline if necessary */
506 if (entry->buf[size - 1] != '\n') {
507 entry->buf[size] = '\n';
508 entry->buf[size + 1] = '\0';
509 } else
510 entry->buf[size] = '\0';
511
512 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800513 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500514
515 return size;
516}
517EXPORT_SYMBOL_GPL(__trace_puts);
518
519/**
520 * __trace_bputs - write the pointer to a constant string into trace buffer
521 * @ip: The address of the caller
522 * @str: The constant string to write to the buffer to
523 */
524int __trace_bputs(unsigned long ip, const char *str)
525{
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct bputs_entry *entry;
529 unsigned long irq_flags;
530 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800531 int pc;
532
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800533 if (!(trace_flags & TRACE_ITER_PRINTK))
534 return 0;
535
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800536 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500537
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500538 if (unlikely(tracing_selftest_running || tracing_disabled))
539 return 0;
540
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500541 local_save_flags(irq_flags);
542 buffer = global_trace.trace_buffer.buffer;
543 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800544 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500545 if (!event)
546 return 0;
547
548 entry = ring_buffer_event_data(event);
549 entry->ip = ip;
550 entry->str = str;
551
552 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800553 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554
555 return 1;
556}
557EXPORT_SYMBOL_GPL(__trace_bputs);
558
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500559#ifdef CONFIG_TRACER_SNAPSHOT
560/**
561 * trace_snapshot - take a snapshot of the current buffer.
562 *
563 * This causes a swap between the snapshot buffer and the current live
564 * tracing buffer. You can use this to take snapshots of the live
565 * trace when some condition is triggered, but continue to trace.
566 *
567 * Note, make sure to allocate the snapshot with either
568 * a tracing_snapshot_alloc(), or by doing it manually
569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
570 *
571 * If the snapshot buffer is not allocated, it will stop tracing.
572 * Basically making a permanent snapshot.
573 */
574void tracing_snapshot(void)
575{
576 struct trace_array *tr = &global_trace;
577 struct tracer *tracer = tr->current_trace;
578 unsigned long flags;
579
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500580 if (in_nmi()) {
581 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 internal_trace_puts("*** snapshot is being ignored ***\n");
583 return;
584 }
585
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500586 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500587 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500589 tracing_off();
590 return;
591 }
592
593 /* Note, snapshot can not be used when the tracer uses it */
594 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500595 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500597 return;
598 }
599
600 local_irq_save(flags);
601 update_max_tr(tr, current, smp_processor_id());
602 local_irq_restore(flags);
603}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500604EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500605
606static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400608static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
609
610static int alloc_snapshot(struct trace_array *tr)
611{
612 int ret;
613
614 if (!tr->allocated_snapshot) {
615
616 /* allocate spare buffer */
617 ret = resize_buffer_duplicate_size(&tr->max_buffer,
618 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
619 if (ret < 0)
620 return ret;
621
622 tr->allocated_snapshot = true;
623 }
624
625 return 0;
626}
627
Fabian Frederickad1438a2014-04-17 21:44:42 +0200628static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400629{
630 /*
631 * We don't free the ring buffer. instead, resize it because
632 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 * we want preserve it.
634 */
635 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636 set_buffer_entries(&tr->max_buffer, 1);
637 tracing_reset_online_cpus(&tr->max_buffer);
638 tr->allocated_snapshot = false;
639}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500640
641/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500642 * tracing_alloc_snapshot - allocate snapshot buffer.
643 *
644 * This only allocates the snapshot buffer if it isn't already
645 * allocated - it doesn't also take a snapshot.
646 *
647 * This is meant to be used in cases where the snapshot buffer needs
648 * to be set up for events that can't sleep but need to be able to
649 * trigger a snapshot.
650 */
651int tracing_alloc_snapshot(void)
652{
653 struct trace_array *tr = &global_trace;
654 int ret;
655
656 ret = alloc_snapshot(tr);
657 WARN_ON(ret < 0);
658
659 return ret;
660}
661EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
662
663/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
665 *
666 * This is similar to trace_snapshot(), but it will allocate the
667 * snapshot buffer if it isn't already allocated. Use this only
668 * where it is safe to sleep, as the allocation may sleep.
669 *
670 * This causes a swap between the snapshot buffer and the current live
671 * tracing buffer. You can use this to take snapshots of the live
672 * trace when some condition is triggered, but continue to trace.
673 */
674void tracing_snapshot_alloc(void)
675{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500676 int ret;
677
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500678 ret = tracing_alloc_snapshot();
679 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400680 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500681
682 tracing_snapshot();
683}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500684EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500685#else
686void tracing_snapshot(void)
687{
688 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
689}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500690EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500691int tracing_alloc_snapshot(void)
692{
693 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
694 return -ENODEV;
695}
696EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500697void tracing_snapshot_alloc(void)
698{
699 /* Give warning */
700 tracing_snapshot();
701}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500702EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500703#endif /* CONFIG_TRACER_SNAPSHOT */
704
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400705static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706{
707 if (tr->trace_buffer.buffer)
708 ring_buffer_record_off(tr->trace_buffer.buffer);
709 /*
710 * This flag is looked at when buffers haven't been allocated
711 * yet, or by some tracers (like irqsoff), that just want to
712 * know if the ring buffer has been disabled, but it can handle
713 * races of where it gets disabled but we still do a record.
714 * As the check is in the fast path of the tracers, it is more
715 * important to be fast than accurate.
716 */
717 tr->buffer_disabled = 1;
718 /* Make the flag seen by readers */
719 smp_wmb();
720}
721
Steven Rostedt499e5472012-02-22 15:50:28 -0500722/**
723 * tracing_off - turn off tracing buffers
724 *
725 * This function stops the tracing buffers from recording data.
726 * It does not disable any overhead the tracers themselves may
727 * be causing. This function simply causes all recording to
728 * the ring buffers to fail.
729 */
730void tracing_off(void)
731{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400732 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500733}
734EXPORT_SYMBOL_GPL(tracing_off);
735
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400736void disable_trace_on_warning(void)
737{
738 if (__disable_trace_on_warning)
739 tracing_off();
740}
741
Steven Rostedt499e5472012-02-22 15:50:28 -0500742/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400743 * tracer_tracing_is_on - show real state of ring buffer enabled
744 * @tr : the trace array to know if ring buffer is enabled
745 *
746 * Shows real state of the ring buffer if it is enabled or not.
747 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400748static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400749{
750 if (tr->trace_buffer.buffer)
751 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752 return !tr->buffer_disabled;
753}
754
Steven Rostedt499e5472012-02-22 15:50:28 -0500755/**
756 * tracing_is_on - show state of ring buffers enabled
757 */
758int tracing_is_on(void)
759{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400760 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500761}
762EXPORT_SYMBOL_GPL(tracing_is_on);
763
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400764static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200765{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400766 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200767
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200768 if (!str)
769 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800770 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200771 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800772 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200773 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400774 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200775 return 1;
776}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400777__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200778
Tim Bird0e950172010-02-25 15:36:43 -0800779static int __init set_tracing_thresh(char *str)
780{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800781 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800782 int ret;
783
784 if (!str)
785 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200786 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800787 if (ret < 0)
788 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800789 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800790 return 1;
791}
792__setup("tracing_thresh=", set_tracing_thresh);
793
Steven Rostedt57f50be2008-05-12 21:20:44 +0200794unsigned long nsecs_to_usecs(unsigned long nsecs)
795{
796 return nsecs / 1000;
797}
798
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200799/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200800static const char *trace_options[] = {
801 "print-parent",
802 "sym-offset",
803 "sym-addr",
804 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200805 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200806 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200807 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200808 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200809 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100810 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500811 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500812 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500813 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200814 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200815 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100816 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200817 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500818 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400819 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400820 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800821 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800822 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400823 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500824 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700825 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400826 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200827 NULL
828};
829
Zhaolei5079f322009-08-25 16:12:56 +0800830static struct {
831 u64 (*func)(void);
832 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800833 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800834} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000835 { trace_clock_local, "local", 1 },
836 { trace_clock_global, "global", 1 },
837 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700838 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000839 { trace_clock, "perf", 1 },
840 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800841 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800842};
843
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200844/*
845 * trace_parser_get_init - gets the buffer for trace parser
846 */
847int trace_parser_get_init(struct trace_parser *parser, int size)
848{
849 memset(parser, 0, sizeof(*parser));
850
851 parser->buffer = kmalloc(size, GFP_KERNEL);
852 if (!parser->buffer)
853 return 1;
854
855 parser->size = size;
856 return 0;
857}
858
859/*
860 * trace_parser_put - frees the buffer for trace parser
861 */
862void trace_parser_put(struct trace_parser *parser)
863{
864 kfree(parser->buffer);
865}
866
867/*
868 * trace_get_user - reads the user input string separated by space
869 * (matched by isspace(ch))
870 *
871 * For each string found the 'struct trace_parser' is updated,
872 * and the function returns.
873 *
874 * Returns number of bytes read.
875 *
876 * See kernel/trace/trace.h for 'struct trace_parser' details.
877 */
878int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879 size_t cnt, loff_t *ppos)
880{
881 char ch;
882 size_t read = 0;
883 ssize_t ret;
884
885 if (!*ppos)
886 trace_parser_clear(parser);
887
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891
892 read++;
893 cnt--;
894
895 /*
896 * The parser is not finished with the last write,
897 * continue reading the user input without skipping spaces.
898 */
899 if (!parser->cont) {
900 /* skip white space */
901 while (cnt && isspace(ch)) {
902 ret = get_user(ch, ubuf++);
903 if (ret)
904 goto out;
905 read++;
906 cnt--;
907 }
908
909 /* only spaces were written */
910 if (isspace(ch)) {
911 *ppos += read;
912 ret = read;
913 goto out;
914 }
915
916 parser->idx = 0;
917 }
918
919 /* read the non-space input */
920 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800921 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200922 parser->buffer[parser->idx++] = ch;
923 else {
924 ret = -EINVAL;
925 goto out;
926 }
927 ret = get_user(ch, ubuf++);
928 if (ret)
929 goto out;
930 read++;
931 cnt--;
932 }
933
934 /* We either got finished input or we have to wait for another call. */
935 if (isspace(ch)) {
936 parser->buffer[parser->idx] = 0;
937 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400938 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200939 parser->cont = true;
940 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400941 } else {
942 ret = -EINVAL;
943 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200944 }
945
946 *ppos += read;
947 ret = read;
948
949out:
950 return ret;
951}
952
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400953/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200954static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200955{
956 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200957
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500958 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200959 return -EBUSY;
960
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500961 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200962 if (cnt > len)
963 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400964 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200965
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400966 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200967 return cnt;
968}
969
Tim Bird0e950172010-02-25 15:36:43 -0800970unsigned long __read_mostly tracing_thresh;
971
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400973/*
974 * Copy the new maximum trace into the separate maximum-trace
975 * structure. (this way the maximum trace is permanently saved,
976 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
977 */
978static void
979__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
980{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500981 struct trace_buffer *trace_buf = &tr->trace_buffer;
982 struct trace_buffer *max_buf = &tr->max_buffer;
983 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
984 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400985
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500986 max_buf->cpu = cpu;
987 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500989 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->critical_start = data->critical_start;
991 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400992
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300993 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400994 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400995 /*
996 * If tsk == current, then use current_uid(), as that does not use
997 * RCU. The irq tracer can be called out of RCU scope.
998 */
999 if (tsk == current)
1000 max_data->uid = current_uid();
1001 else
1002 max_data->uid = task_uid(tsk);
1003
Steven Rostedt8248ac02009-09-02 12:27:41 -04001004 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005 max_data->policy = tsk->policy;
1006 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001007
1008 /* record this tasks comm */
1009 tracing_record_cmdline(tsk);
1010}
1011
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001012/**
1013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014 * @tr: tracer
1015 * @tsk: the task with the latency
1016 * @cpu: The cpu that initiated the trace.
1017 *
1018 * Flip the buffers between the @tr and the max_tr and record information
1019 * about which task was the cause of this latency.
1020 */
Ingo Molnare309b412008-05-12 21:20:51 +02001021void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001022update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1023{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001024 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001025
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001026 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001027 return;
1028
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001029 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001030
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001031 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001032 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001033 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001034 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001035 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001036
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001037 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001039 buf = tr->trace_buffer.buffer;
1040 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001043 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001044 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045}
1046
1047/**
1048 * update_max_tr_single - only copy one trace over, and reset the rest
1049 * @tr - tracer
1050 * @tsk - task with the latency
1051 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001052 *
1053 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001054 */
Ingo Molnare309b412008-05-12 21:20:51 +02001055void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001056update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1057{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001058 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001059
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001060 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001061 return;
1062
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001063 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001064 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001065 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001066 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001067 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001068 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001069
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001070 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001071
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001072 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001073
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001074 if (ret == -EBUSY) {
1075 /*
1076 * We failed to swap the buffer due to a commit taking
1077 * place on this CPU. We fail to record, but we reset
1078 * the max trace buffer (no one writes directly to it)
1079 * and flag that it failed.
1080 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001081 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001082 "Failed to swap buffers due to commit in progress\n");
1083 }
1084
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001085 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001086
1087 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001088 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001089}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001090#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001091
Rabin Vincente30f53a2014-11-10 19:46:34 +01001092static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001094 /* Iterators are static, they should be filled or empty */
1095 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001096 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001097
Rabin Vincente30f53a2014-11-10 19:46:34 +01001098 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1099 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001100}
1101
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001102#ifdef CONFIG_FTRACE_STARTUP_TEST
1103static int run_tracer_selftest(struct tracer *type)
1104{
1105 struct trace_array *tr = &global_trace;
1106 struct tracer *saved_tracer = tr->current_trace;
1107 int ret;
1108
1109 if (!type->selftest || tracing_selftest_disabled)
1110 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111
1112 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001113 * Run a selftest on this tracer.
1114 * Here we reset the trace buffer, and set the current
1115 * tracer to be this tracer. The tracer can then run some
1116 * internal tracing to verify that everything is in order.
1117 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001118 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001119 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001120
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001121 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001122
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001123#ifdef CONFIG_TRACER_MAX_TRACE
1124 if (type->use_max_tr) {
1125 /* If we expanded the buffers, make sure the max is expanded too */
1126 if (ring_buffer_expanded)
1127 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128 RING_BUFFER_ALL_CPUS);
1129 tr->allocated_snapshot = true;
1130 }
1131#endif
1132
1133 /* the test is responsible for initializing and enabling */
1134 pr_info("Testing tracer %s: ", type->name);
1135 ret = type->selftest(type, tr);
1136 /* the test is responsible for resetting too */
1137 tr->current_trace = saved_tracer;
1138 if (ret) {
1139 printk(KERN_CONT "FAILED!\n");
1140 /* Add the warning after printing 'FAILED' */
1141 WARN_ON(1);
1142 return -1;
1143 }
1144 /* Only reset on passing, to avoid touching corrupted buffers */
1145 tracing_reset_online_cpus(&tr->trace_buffer);
1146
1147#ifdef CONFIG_TRACER_MAX_TRACE
1148 if (type->use_max_tr) {
1149 tr->allocated_snapshot = false;
1150
1151 /* Shrink the max buffer again */
1152 if (ring_buffer_expanded)
1153 ring_buffer_resize(tr->max_buffer.buffer, 1,
1154 RING_BUFFER_ALL_CPUS);
1155 }
1156#endif
1157
1158 printk(KERN_CONT "PASSED\n");
1159 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001160}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001161#else
1162static inline int run_tracer_selftest(struct tracer *type)
1163{
1164 return 0;
1165}
1166#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001167
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001168/**
1169 * register_tracer - register a tracer with the ftrace system.
1170 * @type - the plugin for the tracer
1171 *
1172 * Register a new plugin tracer.
1173 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001174int register_tracer(struct tracer *type)
1175{
1176 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001177 int ret = 0;
1178
1179 if (!type->name) {
1180 pr_info("Tracer must have a name\n");
1181 return -1;
1182 }
1183
Dan Carpenter24a461d2010-07-10 12:06:44 +02001184 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001185 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1186 return -1;
1187 }
1188
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001189 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001190
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001191 tracing_selftest_running = true;
1192
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001193 for (t = trace_types; t; t = t->next) {
1194 if (strcmp(type->name, t->name) == 0) {
1195 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001196 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001197 type->name);
1198 ret = -1;
1199 goto out;
1200 }
1201 }
1202
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001203 if (!type->set_flag)
1204 type->set_flag = &dummy_set_flag;
1205 if (!type->flags)
1206 type->flags = &dummy_tracer_flags;
1207 else
1208 if (!type->flags->opts)
1209 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001210
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001211 ret = run_tracer_selftest(type);
1212 if (ret < 0)
1213 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001214
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 type->next = trace_types;
1216 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001217
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001219 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001220 mutex_unlock(&trace_types_lock);
1221
Steven Rostedtdac74942009-02-05 01:13:38 -05001222 if (ret || !default_bootup_tracer)
1223 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001224
Li Zefanee6c2c12009-09-18 14:06:47 +08001225 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 goto out_unlock;
1227
1228 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001230 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001231 default_bootup_tracer = NULL;
1232 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001233 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001234#ifdef CONFIG_FTRACE_STARTUP_TEST
1235 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1236 type->name);
1237#endif
1238
1239 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001240 return ret;
1241}
1242
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001243void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001244{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001245 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001246
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001247 if (!buffer)
1248 return;
1249
Steven Rostedtf6339032009-09-04 12:35:16 -04001250 ring_buffer_record_disable(buffer);
1251
1252 /* Make sure all commits have finished */
1253 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001254 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001255
1256 ring_buffer_record_enable(buffer);
1257}
1258
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001259void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001260{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001261 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001262 int cpu;
1263
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001264 if (!buffer)
1265 return;
1266
Steven Rostedt621968c2009-09-04 12:02:35 -04001267 ring_buffer_record_disable(buffer);
1268
1269 /* Make sure all commits have finished */
1270 synchronize_sched();
1271
Alexander Z Lam94571582013-08-02 18:36:16 -07001272 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273
1274 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001275 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001276
1277 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001278}
1279
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001280/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001281void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001282{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001283 struct trace_array *tr;
1284
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001286 tracing_reset_online_cpus(&tr->trace_buffer);
1287#ifdef CONFIG_TRACER_MAX_TRACE
1288 tracing_reset_online_cpus(&tr->max_buffer);
1289#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001290 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001291}
1292
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001293#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001294#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001295static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001296struct saved_cmdlines_buffer {
1297 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298 unsigned *map_cmdline_to_pid;
1299 unsigned cmdline_num;
1300 int cmdline_idx;
1301 char *saved_cmdlines;
1302};
1303static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001304
Steven Rostedt25b0b442008-05-12 21:21:00 +02001305/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001306static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001307
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001308static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001309{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001310 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1311}
1312
1313static inline void set_cmdline(int idx, const char *cmdline)
1314{
1315 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1316}
1317
1318static int allocate_cmdlines_buffer(unsigned int val,
1319 struct saved_cmdlines_buffer *s)
1320{
1321 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1322 GFP_KERNEL);
1323 if (!s->map_cmdline_to_pid)
1324 return -ENOMEM;
1325
1326 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327 if (!s->saved_cmdlines) {
1328 kfree(s->map_cmdline_to_pid);
1329 return -ENOMEM;
1330 }
1331
1332 s->cmdline_idx = 0;
1333 s->cmdline_num = val;
1334 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335 sizeof(s->map_pid_to_cmdline));
1336 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337 val * sizeof(*s->map_cmdline_to_pid));
1338
1339 return 0;
1340}
1341
1342static int trace_create_savedcmd(void)
1343{
1344 int ret;
1345
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001346 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001347 if (!savedcmd)
1348 return -ENOMEM;
1349
1350 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1351 if (ret < 0) {
1352 kfree(savedcmd);
1353 savedcmd = NULL;
1354 return -ENOMEM;
1355 }
1356
1357 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001358}
1359
Carsten Emdeb5130b12009-09-13 01:43:07 +02001360int is_tracing_stopped(void)
1361{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001362 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001363}
1364
Steven Rostedt0f048702008-11-05 16:05:44 -05001365/**
1366 * tracing_start - quick start of the tracer
1367 *
1368 * If tracing is enabled but was stopped by tracing_stop,
1369 * this will start the tracer back up.
1370 */
1371void tracing_start(void)
1372{
1373 struct ring_buffer *buffer;
1374 unsigned long flags;
1375
1376 if (tracing_disabled)
1377 return;
1378
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380 if (--global_trace.stop_count) {
1381 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001382 /* Someone screwed up their debugging */
1383 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001384 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001385 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001386 goto out;
1387 }
1388
Steven Rostedta2f80712010-03-12 19:56:00 -05001389 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001390 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001391
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001392 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001393 if (buffer)
1394 ring_buffer_record_enable(buffer);
1395
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001396#ifdef CONFIG_TRACER_MAX_TRACE
1397 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001398 if (buffer)
1399 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001400#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001401
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001402 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001403
Steven Rostedt0f048702008-11-05 16:05:44 -05001404 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001405 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1406}
1407
1408static void tracing_start_tr(struct trace_array *tr)
1409{
1410 struct ring_buffer *buffer;
1411 unsigned long flags;
1412
1413 if (tracing_disabled)
1414 return;
1415
1416 /* If global, we need to also start the max tracer */
1417 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418 return tracing_start();
1419
1420 raw_spin_lock_irqsave(&tr->start_lock, flags);
1421
1422 if (--tr->stop_count) {
1423 if (tr->stop_count < 0) {
1424 /* Someone screwed up their debugging */
1425 WARN_ON_ONCE(1);
1426 tr->stop_count = 0;
1427 }
1428 goto out;
1429 }
1430
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001431 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001432 if (buffer)
1433 ring_buffer_record_enable(buffer);
1434
1435 out:
1436 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001437}
1438
1439/**
1440 * tracing_stop - quick stop of the tracer
1441 *
1442 * Light weight way to stop tracing. Use in conjunction with
1443 * tracing_start.
1444 */
1445void tracing_stop(void)
1446{
1447 struct ring_buffer *buffer;
1448 unsigned long flags;
1449
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001450 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001452 goto out;
1453
Steven Rostedta2f80712010-03-12 19:56:00 -05001454 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001455 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001457 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001458 if (buffer)
1459 ring_buffer_record_disable(buffer);
1460
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001461#ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001463 if (buffer)
1464 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001465#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001466
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001467 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001468
Steven Rostedt0f048702008-11-05 16:05:44 -05001469 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_stop_tr(struct trace_array *tr)
1474{
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 /* If global, we need to also stop the max tracer */
1479 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 return tracing_stop();
1481
1482 raw_spin_lock_irqsave(&tr->start_lock, flags);
1483 if (tr->stop_count++)
1484 goto out;
1485
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001486 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001487 if (buffer)
1488 ring_buffer_record_disable(buffer);
1489
1490 out:
1491 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001492}
1493
Ingo Molnare309b412008-05-12 21:20:51 +02001494void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001496static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497{
Carsten Emdea635cf02009-03-18 09:00:41 +01001498 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001499
1500 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001501 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001502
1503 /*
1504 * It's not the end of the world if we don't get
1505 * the lock, but we also don't want to spin
1506 * nor do we want to disable interrupts,
1507 * so if we miss here, then better luck next time.
1508 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001509 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001510 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001512 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001513 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001514 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515
Carsten Emdea635cf02009-03-18 09:00:41 +01001516 /*
1517 * Check whether the cmdline buffer at idx has a pid
1518 * mapped. We are going to overwrite that entry so we
1519 * need to clear the map_pid_to_cmdline. Otherwise we
1520 * would read the new comm for the old pid.
1521 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001522 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001523 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001524 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001526 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001529 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001530 }
1531
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001532 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001533
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001534 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001535
1536 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001537}
1538
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001539static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001540{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001541 unsigned map;
1542
Steven Rostedt4ca530852009-03-16 19:20:15 -04001543 if (!pid) {
1544 strcpy(comm, "<idle>");
1545 return;
1546 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001547
Steven Rostedt74bf4072010-01-25 15:11:53 -05001548 if (WARN_ON_ONCE(pid < 0)) {
1549 strcpy(comm, "<XXX>");
1550 return;
1551 }
1552
Steven Rostedt4ca530852009-03-16 19:20:15 -04001553 if (pid > PID_MAX_DEFAULT) {
1554 strcpy(comm, "<...>");
1555 return;
1556 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001558 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001559 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001560 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001561 else
1562 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001563}
1564
1565void trace_find_cmdline(int pid, char comm[])
1566{
1567 preempt_disable();
1568 arch_spin_lock(&trace_cmdline_lock);
1569
1570 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001571
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001572 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001573 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001574}
1575
Ingo Molnare309b412008-05-12 21:20:51 +02001576void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001578 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001579 return;
1580
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001581 if (!__this_cpu_read(trace_cmdline_save))
1582 return;
1583
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001584 if (trace_save_cmdline(tsk))
1585 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001586}
1587
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001588void
Steven Rostedt38697052008-10-01 13:14:09 -04001589tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1590 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591{
1592 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593
Steven Rostedt777e2082008-09-29 23:02:42 -04001594 entry->preempt_count = pc & 0xff;
1595 entry->pid = (tsk) ? tsk->pid : 0;
1596 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001597#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001598 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001599#else
1600 TRACE_FLAG_IRQS_NOSUPPORT |
1601#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001604 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001607EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001608
Steven Rostedte77405a2009-09-02 14:17:06 -04001609struct ring_buffer_event *
1610trace_buffer_lock_reserve(struct ring_buffer *buffer,
1611 int type,
1612 unsigned long len,
1613 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001614{
1615 struct ring_buffer_event *event;
1616
Steven Rostedte77405a2009-09-02 14:17:06 -04001617 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001618 if (event != NULL) {
1619 struct trace_entry *ent = ring_buffer_event_data(event);
1620
1621 tracing_generic_entry_update(ent, flags, pc);
1622 ent->type = type;
1623 }
1624
1625 return event;
1626}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001627
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001628void
1629__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1630{
1631 __this_cpu_write(trace_cmdline_save, true);
1632 ring_buffer_unlock_commit(buffer, event);
1633}
1634
Steven Rostedte77405a2009-09-02 14:17:06 -04001635static inline void
1636__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001638 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001639{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001640 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001641
Steven Rostedte77405a2009-09-02 14:17:06 -04001642 ftrace_trace_stack(buffer, flags, 6, pc);
1643 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001644}
1645
Steven Rostedte77405a2009-09-02 14:17:06 -04001646void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647 struct ring_buffer_event *event,
1648 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001649{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001650 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001651}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001652EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001653
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001654static struct ring_buffer *temp_buffer;
1655
Steven Rostedtef5580d2009-02-27 19:38:04 -05001656struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001657trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658 struct ftrace_event_file *ftrace_file,
1659 int type, unsigned long len,
1660 unsigned long flags, int pc)
1661{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001662 struct ring_buffer_event *entry;
1663
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001664 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001665 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001666 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001667 /*
1668 * If tracing is off, but we have triggers enabled
1669 * we still need to look at the event data. Use the temp_buffer
1670 * to store the trace event for the tigger to use. It's recusive
1671 * safe and will not be recorded anywhere.
1672 */
1673 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674 *current_rb = temp_buffer;
1675 entry = trace_buffer_lock_reserve(*current_rb,
1676 type, len, flags, pc);
1677 }
1678 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001679}
1680EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1681
1682struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001683trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001685 unsigned long flags, int pc)
1686{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001687 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001688 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001689 type, len, flags, pc);
1690}
Steven Rostedt94487d62009-05-05 19:22:53 -04001691EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001692
Steven Rostedte77405a2009-09-02 14:17:06 -04001693void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001695 unsigned long flags, int pc)
1696{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001697 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001698}
Steven Rostedt94487d62009-05-05 19:22:53 -04001699EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001700
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001701void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702 struct ring_buffer_event *event,
1703 unsigned long flags, int pc,
1704 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001705{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001706 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001707
1708 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709 ftrace_trace_userstack(buffer, flags, pc);
1710}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001712
Steven Rostedte77405a2009-09-02 14:17:06 -04001713void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001715{
Steven Rostedte77405a2009-09-02 14:17:06 -04001716 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001717}
Steven Rostedt12acd472009-04-17 16:01:56 -04001718EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001719
Ingo Molnare309b412008-05-12 21:20:51 +02001720void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001721trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001722 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1723 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001724{
Tom Zanussie1112b42009-03-31 00:48:49 -05001725 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001726 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001727 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001728 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001729
Steven Rostedtd7690412008-10-01 00:29:53 -04001730 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001731 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001732 return;
1733
Steven Rostedte77405a2009-09-02 14:17:06 -04001734 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001735 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001736 if (!event)
1737 return;
1738 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001739 entry->ip = ip;
1740 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001741
Tom Zanussif306cc82013-10-24 08:34:17 -05001742 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001743 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001744}
1745
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001746#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001747
1748#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749struct ftrace_stack {
1750 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1751};
1752
1753static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1755
Steven Rostedte77405a2009-09-02 14:17:06 -04001756static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001757 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001758 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001759{
Tom Zanussie1112b42009-03-31 00:48:49 -05001760 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001761 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001762 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001763 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001764 int use_stack;
1765 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001766
1767 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001768 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001769
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001770 /*
1771 * Since events can happen in NMIs there's no safe way to
1772 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773 * or NMI comes in, it will just have to use the default
1774 * FTRACE_STACK_SIZE.
1775 */
1776 preempt_disable_notrace();
1777
Shan Wei82146522012-11-19 13:21:01 +08001778 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001779 /*
1780 * We don't need any atomic variables, just a barrier.
1781 * If an interrupt comes in, we don't care, because it would
1782 * have exited and put the counter back to what we want.
1783 * We just need a barrier to keep gcc from moving things
1784 * around.
1785 */
1786 barrier();
1787 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001788 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001789 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1790
1791 if (regs)
1792 save_stack_trace_regs(regs, &trace);
1793 else
1794 save_stack_trace(&trace);
1795
1796 if (trace.nr_entries > size)
1797 size = trace.nr_entries;
1798 } else
1799 /* From now on, use_stack is a boolean */
1800 use_stack = 0;
1801
1802 size *= sizeof(unsigned long);
1803
1804 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1805 sizeof(*entry) + size, flags, pc);
1806 if (!event)
1807 goto out;
1808 entry = ring_buffer_event_data(event);
1809
1810 memset(&entry->caller, 0, size);
1811
1812 if (use_stack)
1813 memcpy(&entry->caller, trace.entries,
1814 trace.nr_entries * sizeof(unsigned long));
1815 else {
1816 trace.max_entries = FTRACE_STACK_ENTRIES;
1817 trace.entries = entry->caller;
1818 if (regs)
1819 save_stack_trace_regs(regs, &trace);
1820 else
1821 save_stack_trace(&trace);
1822 }
1823
1824 entry->size = trace.nr_entries;
1825
Tom Zanussif306cc82013-10-24 08:34:17 -05001826 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001827 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001828
1829 out:
1830 /* Again, don't let gcc optimize things here */
1831 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001832 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001833 preempt_enable_notrace();
1834
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001835}
1836
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001837void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838 int skip, int pc, struct pt_regs *regs)
1839{
1840 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1841 return;
1842
1843 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1844}
1845
Steven Rostedte77405a2009-09-02 14:17:06 -04001846void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1847 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001848{
1849 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850 return;
1851
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001852 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001853}
1854
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001855void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1856 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001857{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001858 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001859}
1860
Steven Rostedt03889382009-12-11 09:48:22 -05001861/**
1862 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001863 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001864 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001865void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001866{
1867 unsigned long flags;
1868
1869 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001870 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001871
1872 local_save_flags(flags);
1873
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001874 /*
1875 * Skip 3 more, seems to get us at the caller of
1876 * this function.
1877 */
1878 skip += 3;
1879 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001881}
1882
Steven Rostedt91e86e52010-11-10 12:56:12 +01001883static DEFINE_PER_CPU(int, user_stack_count);
1884
Steven Rostedte77405a2009-09-02 14:17:06 -04001885void
1886ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001887{
Tom Zanussie1112b42009-03-31 00:48:49 -05001888 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001889 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001890 struct userstack_entry *entry;
1891 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001892
1893 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894 return;
1895
Steven Rostedtb6345872010-03-12 20:03:30 -05001896 /*
1897 * NMIs can not handle page faults, even with fix ups.
1898 * The save user stack can (and often does) fault.
1899 */
1900 if (unlikely(in_nmi()))
1901 return;
1902
Steven Rostedt91e86e52010-11-10 12:56:12 +01001903 /*
1904 * prevent recursion, since the user stack tracing may
1905 * trigger other kernel events.
1906 */
1907 preempt_disable();
1908 if (__this_cpu_read(user_stack_count))
1909 goto out;
1910
1911 __this_cpu_inc(user_stack_count);
1912
Steven Rostedte77405a2009-09-02 14:17:06 -04001913 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001914 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001915 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001916 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001917 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001918
Steven Rostedt48659d32009-09-11 11:36:23 -04001919 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001920 memset(&entry->caller, 0, sizeof(entry->caller));
1921
1922 trace.nr_entries = 0;
1923 trace.max_entries = FTRACE_STACK_ENTRIES;
1924 trace.skip = 0;
1925 trace.entries = entry->caller;
1926
1927 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001928 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001929 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001930
Li Zefan1dbd1952010-12-09 15:47:56 +08001931 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001932 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001933 out:
1934 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001935}
1936
Hannes Eder4fd27352009-02-10 19:44:12 +01001937#ifdef UNUSED
1938static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001939{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001940 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001941}
Hannes Eder4fd27352009-02-10 19:44:12 +01001942#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001943
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001944#endif /* CONFIG_STACKTRACE */
1945
Steven Rostedt07d777f2011-09-22 14:01:55 -04001946/* created for use with alloc_percpu */
1947struct trace_buffer_struct {
1948 char buffer[TRACE_BUF_SIZE];
1949};
1950
1951static struct trace_buffer_struct *trace_percpu_buffer;
1952static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1955
1956/*
1957 * The buffer used is dependent on the context. There is a per cpu
1958 * buffer for normal context, softirq contex, hard irq context and
1959 * for NMI context. Thise allows for lockless recording.
1960 *
1961 * Note, if the buffers failed to be allocated, then this returns NULL
1962 */
1963static char *get_trace_buf(void)
1964{
1965 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001966
1967 /*
1968 * If we have allocated per cpu buffers, then we do not
1969 * need to do any locking.
1970 */
1971 if (in_nmi())
1972 percpu_buffer = trace_percpu_nmi_buffer;
1973 else if (in_irq())
1974 percpu_buffer = trace_percpu_irq_buffer;
1975 else if (in_softirq())
1976 percpu_buffer = trace_percpu_sirq_buffer;
1977 else
1978 percpu_buffer = trace_percpu_buffer;
1979
1980 if (!percpu_buffer)
1981 return NULL;
1982
Shan Weid8a03492012-11-13 09:53:04 +08001983 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001984}
1985
1986static int alloc_percpu_trace_buffer(void)
1987{
1988 struct trace_buffer_struct *buffers;
1989 struct trace_buffer_struct *sirq_buffers;
1990 struct trace_buffer_struct *irq_buffers;
1991 struct trace_buffer_struct *nmi_buffers;
1992
1993 buffers = alloc_percpu(struct trace_buffer_struct);
1994 if (!buffers)
1995 goto err_warn;
1996
1997 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998 if (!sirq_buffers)
1999 goto err_sirq;
2000
2001 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002 if (!irq_buffers)
2003 goto err_irq;
2004
2005 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006 if (!nmi_buffers)
2007 goto err_nmi;
2008
2009 trace_percpu_buffer = buffers;
2010 trace_percpu_sirq_buffer = sirq_buffers;
2011 trace_percpu_irq_buffer = irq_buffers;
2012 trace_percpu_nmi_buffer = nmi_buffers;
2013
2014 return 0;
2015
2016 err_nmi:
2017 free_percpu(irq_buffers);
2018 err_irq:
2019 free_percpu(sirq_buffers);
2020 err_sirq:
2021 free_percpu(buffers);
2022 err_warn:
2023 WARN(1, "Could not allocate percpu trace_printk buffer");
2024 return -ENOMEM;
2025}
2026
Steven Rostedt81698832012-10-11 10:15:05 -04002027static int buffers_allocated;
2028
Steven Rostedt07d777f2011-09-22 14:01:55 -04002029void trace_printk_init_buffers(void)
2030{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002031 if (buffers_allocated)
2032 return;
2033
2034 if (alloc_percpu_trace_buffer())
2035 return;
2036
Steven Rostedt2184db42014-05-28 13:14:40 -04002037 /* trace_printk() is for debug use only. Don't use it in production. */
2038
Borislav Petkov69a1c992015-01-27 17:17:20 +01002039 pr_warning("\n");
2040 pr_warning("**********************************************************\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002041 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2042 pr_warning("** **\n");
2043 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2044 pr_warning("** **\n");
2045 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002046 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002047 pr_warning("** **\n");
2048 pr_warning("** If you see this message and you are not debugging **\n");
2049 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2050 pr_warning("** **\n");
2051 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2052 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002053
Steven Rostedtb382ede62012-10-10 21:44:34 -04002054 /* Expand the buffers to set size */
2055 tracing_update_buffers();
2056
Steven Rostedt07d777f2011-09-22 14:01:55 -04002057 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002058
2059 /*
2060 * trace_printk_init_buffers() can be called by modules.
2061 * If that happens, then we need to start cmdline recording
2062 * directly here. If the global_trace.buffer is already
2063 * allocated here, then this was called by module code.
2064 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002065 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002066 tracing_start_cmdline_record();
2067}
2068
2069void trace_printk_start_comm(void)
2070{
2071 /* Start tracing comms if trace printk is set */
2072 if (!buffers_allocated)
2073 return;
2074 tracing_start_cmdline_record();
2075}
2076
2077static void trace_printk_start_stop_comm(int enabled)
2078{
2079 if (!buffers_allocated)
2080 return;
2081
2082 if (enabled)
2083 tracing_start_cmdline_record();
2084 else
2085 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002086}
2087
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002088/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002089 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002090 *
2091 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002092int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002093{
Tom Zanussie1112b42009-03-31 00:48:49 -05002094 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002095 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002096 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002097 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002098 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002099 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002100 char *tbuffer;
2101 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002102
2103 if (unlikely(tracing_selftest_running || tracing_disabled))
2104 return 0;
2105
2106 /* Don't pollute graph traces with trace_vprintk internals */
2107 pause_graph_tracing();
2108
2109 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002110 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002111
Steven Rostedt07d777f2011-09-22 14:01:55 -04002112 tbuffer = get_trace_buf();
2113 if (!tbuffer) {
2114 len = 0;
2115 goto out;
2116 }
2117
2118 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2119
2120 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002121 goto out;
2122
Steven Rostedt07d777f2011-09-22 14:01:55 -04002123 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002124 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002125 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002126 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2127 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002128 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002129 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002130 entry = ring_buffer_event_data(event);
2131 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002132 entry->fmt = fmt;
2133
Steven Rostedt07d777f2011-09-22 14:01:55 -04002134 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002135 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002136 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002137 ftrace_trace_stack(buffer, flags, 6, pc);
2138 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002139
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002140out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002141 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002142 unpause_graph_tracing();
2143
2144 return len;
2145}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002146EXPORT_SYMBOL_GPL(trace_vbprintk);
2147
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002148static int
2149__trace_array_vprintk(struct ring_buffer *buffer,
2150 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002151{
Tom Zanussie1112b42009-03-31 00:48:49 -05002152 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002153 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002154 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002155 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002156 unsigned long flags;
2157 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002158
2159 if (tracing_disabled || tracing_selftest_running)
2160 return 0;
2161
Steven Rostedt07d777f2011-09-22 14:01:55 -04002162 /* Don't pollute graph traces with trace_vprintk internals */
2163 pause_graph_tracing();
2164
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002165 pc = preempt_count();
2166 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002167
Steven Rostedt07d777f2011-09-22 14:01:55 -04002168
2169 tbuffer = get_trace_buf();
2170 if (!tbuffer) {
2171 len = 0;
2172 goto out;
2173 }
2174
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002175 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002176
Steven Rostedt07d777f2011-09-22 14:01:55 -04002177 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002178 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002179 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002180 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002181 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002182 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002183 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002184 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002185
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002186 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002187 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002188 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002189 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002190 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002191 out:
2192 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002193 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002194
2195 return len;
2196}
Steven Rostedt659372d2009-09-03 19:11:07 -04002197
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002198int trace_array_vprintk(struct trace_array *tr,
2199 unsigned long ip, const char *fmt, va_list args)
2200{
2201 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2202}
2203
2204int trace_array_printk(struct trace_array *tr,
2205 unsigned long ip, const char *fmt, ...)
2206{
2207 int ret;
2208 va_list ap;
2209
2210 if (!(trace_flags & TRACE_ITER_PRINTK))
2211 return 0;
2212
2213 va_start(ap, fmt);
2214 ret = trace_array_vprintk(tr, ip, fmt, ap);
2215 va_end(ap);
2216 return ret;
2217}
2218
2219int trace_array_printk_buf(struct ring_buffer *buffer,
2220 unsigned long ip, const char *fmt, ...)
2221{
2222 int ret;
2223 va_list ap;
2224
2225 if (!(trace_flags & TRACE_ITER_PRINTK))
2226 return 0;
2227
2228 va_start(ap, fmt);
2229 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2230 va_end(ap);
2231 return ret;
2232}
2233
Steven Rostedt659372d2009-09-03 19:11:07 -04002234int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2235{
Steven Rostedta813a152009-10-09 01:41:35 -04002236 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002237}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002238EXPORT_SYMBOL_GPL(trace_vprintk);
2239
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002240static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002241{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002242 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2243
Steven Rostedt5a90f572008-09-03 17:42:51 -04002244 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002245 if (buf_iter)
2246 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002247}
2248
Ingo Molnare309b412008-05-12 21:20:51 +02002249static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002250peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2251 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002252{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002253 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002254 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002255
Steven Rostedtd7690412008-10-01 00:29:53 -04002256 if (buf_iter)
2257 event = ring_buffer_iter_peek(buf_iter, ts);
2258 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002259 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002260 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002261
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002262 if (event) {
2263 iter->ent_size = ring_buffer_event_length(event);
2264 return ring_buffer_event_data(event);
2265 }
2266 iter->ent_size = 0;
2267 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002268}
Steven Rostedtd7690412008-10-01 00:29:53 -04002269
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002270static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002271__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2272 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002273{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002274 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002275 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002276 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002277 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002278 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002279 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002280 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002281 int cpu;
2282
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002283 /*
2284 * If we are in a per_cpu trace file, don't bother by iterating over
2285 * all cpu and peek directly.
2286 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002287 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002288 if (ring_buffer_empty_cpu(buffer, cpu_file))
2289 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002290 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002291 if (ent_cpu)
2292 *ent_cpu = cpu_file;
2293
2294 return ent;
2295 }
2296
Steven Rostedtab464282008-05-12 21:21:00 +02002297 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002298
2299 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002300 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002301
Steven Rostedtbc21b472010-03-31 19:49:26 -04002302 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002303
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02002304 /*
2305 * Pick the entry with the smallest timestamp:
2306 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002307 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002308 next = ent;
2309 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002310 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002311 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002312 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002313 }
2314 }
2315
Steven Rostedt12b5da32012-03-27 10:43:28 -04002316 iter->ent_size = next_size;
2317
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002318 if (ent_cpu)
2319 *ent_cpu = next_cpu;
2320
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002321 if (ent_ts)
2322 *ent_ts = next_ts;
2323
Steven Rostedtbc21b472010-03-31 19:49:26 -04002324 if (missing_events)
2325 *missing_events = next_lost;
2326
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002327 return next;
2328}
2329
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002330/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002331struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2332 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002333{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002334 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002335}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002336
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002337/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002338void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002339{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002340 iter->ent = __find_next_entry(iter, &iter->cpu,
2341 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002342
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002343 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002344 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002345
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002346 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002347}
2348
Ingo Molnare309b412008-05-12 21:20:51 +02002349static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002350{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002351 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002352 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353}
2354
Ingo Molnare309b412008-05-12 21:20:51 +02002355static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002356{
2357 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002358 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002359 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002360
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002361 WARN_ON_ONCE(iter->leftover);
2362
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002363 (*pos)++;
2364
2365 /* can't go backwards */
2366 if (iter->idx > i)
2367 return NULL;
2368
2369 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002370 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002371 else
2372 ent = iter;
2373
2374 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002375 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002376
2377 iter->pos = *pos;
2378
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002379 return ent;
2380}
2381
Jason Wessel955b61e2010-08-05 09:22:23 -05002382void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002383{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002384 struct ring_buffer_event *event;
2385 struct ring_buffer_iter *buf_iter;
2386 unsigned long entries = 0;
2387 u64 ts;
2388
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002389 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002390
Steven Rostedt6d158a82012-06-27 20:46:14 -04002391 buf_iter = trace_buffer_iter(iter, cpu);
2392 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002393 return;
2394
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002395 ring_buffer_iter_reset(buf_iter);
2396
2397 /*
2398 * We could have the case with the max latency tracers
2399 * that a reset never took place on a cpu. This is evident
2400 * by the timestamp being before the start of the buffer.
2401 */
2402 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002403 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002404 break;
2405 entries++;
2406 ring_buffer_read(buf_iter, NULL);
2407 }
2408
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002409 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002410}
2411
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002412/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002413 * The current tracer is copied to avoid a global locking
2414 * all around.
2415 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002416static void *s_start(struct seq_file *m, loff_t *pos)
2417{
2418 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002419 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002420 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421 void *p = NULL;
2422 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002423 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002424
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002425 /*
2426 * copy the tracer to avoid using a global lock all around.
2427 * iter->trace is a copy of current_trace, the pointer to the
2428 * name may be used instead of a strcmp(), as iter->trace->name
2429 * will point to the same string as current_trace->name.
2430 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002431 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002432 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2433 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002434 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002435
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002436#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002437 if (iter->snapshot && iter->trace->use_max_tr)
2438 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002439#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002440
2441 if (!iter->snapshot)
2442 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002444 if (*pos != iter->pos) {
2445 iter->ent = NULL;
2446 iter->cpu = 0;
2447 iter->idx = -1;
2448
Steven Rostedtae3b5092013-01-23 15:22:59 -05002449 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002450 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002451 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002452 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002453 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002454
Lai Jiangshanac91d852010-03-02 17:54:50 +08002455 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002456 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2457 ;
2458
2459 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002460 /*
2461 * If we overflowed the seq_file before, then we want
2462 * to just reuse the trace_seq buffer again.
2463 */
2464 if (iter->leftover)
2465 p = iter;
2466 else {
2467 l = *pos - 1;
2468 p = s_next(m, p, &l);
2469 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002470 }
2471
Lai Jiangshan4f535962009-05-18 19:35:34 +08002472 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002473 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002474 return p;
2475}
2476
2477static void s_stop(struct seq_file *m, void *p)
2478{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002479 struct trace_iterator *iter = m->private;
2480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002481#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002482 if (iter->snapshot && iter->trace->use_max_tr)
2483 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002484#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002485
2486 if (!iter->snapshot)
2487 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002488
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002489 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002490 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002491}
2492
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002493static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002494get_total_entries(struct trace_buffer *buf,
2495 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002496{
2497 unsigned long count;
2498 int cpu;
2499
2500 *total = 0;
2501 *entries = 0;
2502
2503 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002504 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002505 /*
2506 * If this buffer has skipped entries, then we hold all
2507 * entries for the trace and we need to ignore the
2508 * ones before the time stamp.
2509 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002510 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2511 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002512 /* total is the same as the entries */
2513 *total += count;
2514 } else
2515 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002516 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002517 *entries += count;
2518 }
2519}
2520
Ingo Molnare309b412008-05-12 21:20:51 +02002521static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002523 seq_puts(m, "# _------=> CPU# \n"
2524 "# / _-----=> irqs-off \n"
2525 "# | / _----=> need-resched \n"
2526 "# || / _---=> hardirq/softirq \n"
2527 "# ||| / _--=> preempt-depth \n"
2528 "# |||| / delay \n"
2529 "# cmd pid ||||| time | caller \n"
2530 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002531}
2532
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002533static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002534{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002535 unsigned long total;
2536 unsigned long entries;
2537
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002538 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002539 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2540 entries, total, num_online_cpus());
2541 seq_puts(m, "#\n");
2542}
2543
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002544static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002545{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002546 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002547 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2548 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002549}
2550
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002551static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002552{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002553 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002554 seq_puts(m, "# _-----=> irqs-off\n"
2555 "# / _----=> need-resched\n"
2556 "# | / _---=> hardirq/softirq\n"
2557 "# || / _--=> preempt-depth\n"
2558 "# ||| / delay\n"
2559 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2560 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002561}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002562
Jiri Olsa62b915f2010-04-02 19:01:22 +02002563void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002564print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2565{
2566 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002567 struct trace_buffer *buf = iter->trace_buffer;
2568 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002569 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002570 unsigned long entries;
2571 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572 const char *name = "preemption";
2573
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002574 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002575
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002576 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002577
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002578 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002579 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002580 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002581 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002582 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002583 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002584 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002585 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002586 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002587 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002588#if defined(CONFIG_PREEMPT_NONE)
2589 "server",
2590#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2591 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002592#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002593 "preempt",
2594#else
2595 "unknown",
2596#endif
2597 /* These are reserved for later use */
2598 0, 0, 0, 0);
2599#ifdef CONFIG_SMP
2600 seq_printf(m, " #P:%d)\n", num_online_cpus());
2601#else
2602 seq_puts(m, ")\n");
2603#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002604 seq_puts(m, "# -----------------\n");
2605 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002606 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002607 data->comm, data->pid,
2608 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002609 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002610 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002611
2612 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002613 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002614 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2615 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002616 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002617 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2618 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002619 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002620 }
2621
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002622 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002623}
2624
Steven Rostedta3097202008-11-07 22:36:02 -05002625static void test_cpu_buff_start(struct trace_iterator *iter)
2626{
2627 struct trace_seq *s = &iter->seq;
2628
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002629 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2630 return;
2631
2632 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2633 return;
2634
Rusty Russell44623442009-01-01 10:12:23 +10302635 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002636 return;
2637
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002638 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002639 return;
2640
Rusty Russell44623442009-01-01 10:12:23 +10302641 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002642
2643 /* Don't print started cpu buffer for the first entry of the trace */
2644 if (iter->idx > 1)
2645 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2646 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002647}
2648
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002649static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002650{
Steven Rostedt214023c2008-05-12 21:20:46 +02002651 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002652 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002653 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002654 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002655
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002656 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002657
Steven Rostedta3097202008-11-07 22:36:02 -05002658 test_cpu_buff_start(iter);
2659
Steven Rostedtf633cef2008-12-23 23:24:13 -05002660 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002661
2662 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002663 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2664 trace_print_lat_context(iter);
2665 else
2666 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002667 }
2668
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002669 if (trace_seq_has_overflowed(s))
2670 return TRACE_TYPE_PARTIAL_LINE;
2671
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002672 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002673 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002674
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002675 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002676
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002677 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002678}
2679
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002680static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002681{
2682 struct trace_seq *s = &iter->seq;
2683 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002684 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002685
2686 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002687
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002688 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2689 trace_seq_printf(s, "%d %d %llu ",
2690 entry->pid, iter->cpu, iter->ts);
2691
2692 if (trace_seq_has_overflowed(s))
2693 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002694
Steven Rostedtf633cef2008-12-23 23:24:13 -05002695 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002696 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002697 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002698
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002699 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002700
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002701 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002702}
2703
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002704static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002705{
2706 struct trace_seq *s = &iter->seq;
2707 unsigned char newline = '\n';
2708 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002709 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002710
2711 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002712
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002713 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002714 SEQ_PUT_HEX_FIELD(s, entry->pid);
2715 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2716 SEQ_PUT_HEX_FIELD(s, iter->ts);
2717 if (trace_seq_has_overflowed(s))
2718 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002719 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002720
Steven Rostedtf633cef2008-12-23 23:24:13 -05002721 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002722 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002723 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002724 if (ret != TRACE_TYPE_HANDLED)
2725 return ret;
2726 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002727
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002728 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002729
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002730 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002731}
2732
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002733static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002734{
2735 struct trace_seq *s = &iter->seq;
2736 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002737 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002738
2739 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002740
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002741 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002742 SEQ_PUT_FIELD(s, entry->pid);
2743 SEQ_PUT_FIELD(s, iter->cpu);
2744 SEQ_PUT_FIELD(s, iter->ts);
2745 if (trace_seq_has_overflowed(s))
2746 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002747 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002748
Steven Rostedtf633cef2008-12-23 23:24:13 -05002749 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002750 return event ? event->funcs->binary(iter, 0, event) :
2751 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002752}
2753
Jiri Olsa62b915f2010-04-02 19:01:22 +02002754int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002755{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002756 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002757 int cpu;
2758
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002759 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002760 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002761 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002762 buf_iter = trace_buffer_iter(iter, cpu);
2763 if (buf_iter) {
2764 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002765 return 0;
2766 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002767 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002768 return 0;
2769 }
2770 return 1;
2771 }
2772
Steven Rostedtab464282008-05-12 21:21:00 +02002773 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002774 buf_iter = trace_buffer_iter(iter, cpu);
2775 if (buf_iter) {
2776 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002777 return 0;
2778 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002779 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002780 return 0;
2781 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002782 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002783
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002784 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002785}
2786
Lai Jiangshan4f535962009-05-18 19:35:34 +08002787/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002788enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002789{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002790 enum print_line_t ret;
2791
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002792 if (iter->lost_events) {
2793 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2794 iter->cpu, iter->lost_events);
2795 if (trace_seq_has_overflowed(&iter->seq))
2796 return TRACE_TYPE_PARTIAL_LINE;
2797 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002798
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002799 if (iter->trace && iter->trace->print_line) {
2800 ret = iter->trace->print_line(iter);
2801 if (ret != TRACE_TYPE_UNHANDLED)
2802 return ret;
2803 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002804
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002805 if (iter->ent->type == TRACE_BPUTS &&
2806 trace_flags & TRACE_ITER_PRINTK &&
2807 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2808 return trace_print_bputs_msg_only(iter);
2809
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002810 if (iter->ent->type == TRACE_BPRINT &&
2811 trace_flags & TRACE_ITER_PRINTK &&
2812 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002813 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002814
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002815 if (iter->ent->type == TRACE_PRINT &&
2816 trace_flags & TRACE_ITER_PRINTK &&
2817 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002818 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002819
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002820 if (trace_flags & TRACE_ITER_BIN)
2821 return print_bin_fmt(iter);
2822
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002823 if (trace_flags & TRACE_ITER_HEX)
2824 return print_hex_fmt(iter);
2825
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002826 if (trace_flags & TRACE_ITER_RAW)
2827 return print_raw_fmt(iter);
2828
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002829 return print_trace_fmt(iter);
2830}
2831
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002832void trace_latency_header(struct seq_file *m)
2833{
2834 struct trace_iterator *iter = m->private;
2835
2836 /* print nothing if the buffers are empty */
2837 if (trace_empty(iter))
2838 return;
2839
2840 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2841 print_trace_header(m, iter);
2842
2843 if (!(trace_flags & TRACE_ITER_VERBOSE))
2844 print_lat_help_header(m);
2845}
2846
Jiri Olsa62b915f2010-04-02 19:01:22 +02002847void trace_default_header(struct seq_file *m)
2848{
2849 struct trace_iterator *iter = m->private;
2850
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002851 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2852 return;
2853
Jiri Olsa62b915f2010-04-02 19:01:22 +02002854 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2855 /* print nothing if the buffers are empty */
2856 if (trace_empty(iter))
2857 return;
2858 print_trace_header(m, iter);
2859 if (!(trace_flags & TRACE_ITER_VERBOSE))
2860 print_lat_help_header(m);
2861 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002862 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2863 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002864 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002865 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002866 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002867 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002868 }
2869}
2870
Steven Rostedte0a413f2011-09-29 21:26:16 -04002871static void test_ftrace_alive(struct seq_file *m)
2872{
2873 if (!ftrace_is_dead())
2874 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002875 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2876 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002877}
2878
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002879#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002880static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002881{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002882 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2883 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2884 "# Takes a snapshot of the main buffer.\n"
2885 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2886 "# (Doesn't have to be '2' works with any number that\n"
2887 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002888}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002889
2890static void show_snapshot_percpu_help(struct seq_file *m)
2891{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002892 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002893#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002894 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2895 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002896#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002897 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2898 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002899#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002900 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2901 "# (Doesn't have to be '2' works with any number that\n"
2902 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002903}
2904
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002905static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2906{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002907 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002908 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002909 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002910 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002911
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002912 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002913 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2914 show_snapshot_main_help(m);
2915 else
2916 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002917}
2918#else
2919/* Should never be called */
2920static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2921#endif
2922
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923static int s_show(struct seq_file *m, void *v)
2924{
2925 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002926 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002927
2928 if (iter->ent == NULL) {
2929 if (iter->tr) {
2930 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2931 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002932 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002933 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002934 if (iter->snapshot && trace_empty(iter))
2935 print_snapshot_help(m, iter);
2936 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002937 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002938 else
2939 trace_default_header(m);
2940
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002941 } else if (iter->leftover) {
2942 /*
2943 * If we filled the seq_file buffer earlier, we
2944 * want to just show it now.
2945 */
2946 ret = trace_print_seq(m, &iter->seq);
2947
2948 /* ret should this time be zero, but you never know */
2949 iter->leftover = ret;
2950
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002951 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002952 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002953 ret = trace_print_seq(m, &iter->seq);
2954 /*
2955 * If we overflow the seq_file buffer, then it will
2956 * ask us for this data again at start up.
2957 * Use that instead.
2958 * ret is 0 if seq_file write succeeded.
2959 * -1 otherwise.
2960 */
2961 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002962 }
2963
2964 return 0;
2965}
2966
Oleg Nesterov649e9c702013-07-23 17:25:54 +02002967/*
2968 * Should be used after trace_array_get(), trace_types_lock
2969 * ensures that i_cdev was already initialized.
2970 */
2971static inline int tracing_get_cpu(struct inode *inode)
2972{
2973 if (inode->i_cdev) /* See trace_create_cpu_file() */
2974 return (long)inode->i_cdev - 1;
2975 return RING_BUFFER_ALL_CPUS;
2976}
2977
James Morris88e9d342009-09-22 16:43:43 -07002978static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002979 .start = s_start,
2980 .next = s_next,
2981 .stop = s_stop,
2982 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002983};
2984
Ingo Molnare309b412008-05-12 21:20:51 +02002985static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002986__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002987{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002988 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002989 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002990 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002991
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002992 if (tracing_disabled)
2993 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002994
Jiri Olsa50e18b92012-04-25 10:23:39 +02002995 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002996 if (!iter)
2997 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002998
Steven Rostedt6d158a82012-06-27 20:46:14 -04002999 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3000 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003001 if (!iter->buffer_iter)
3002 goto release;
3003
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003004 /*
3005 * We make a copy of the current tracer to avoid concurrent
3006 * changes on it while we are reading.
3007 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003008 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003009 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003010 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003011 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003012
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003013 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003014
Li Zefan79f55992009-06-15 14:58:26 +08003015 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003016 goto fail;
3017
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003018 iter->tr = tr;
3019
3020#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003021 /* Currently only the top directory has a snapshot */
3022 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003023 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003024 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003025#endif
3026 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003027 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003028 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003029 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003030 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003031
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003032 /* Notify the tracer early; before we stop tracing. */
3033 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003034 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003035
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003036 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003037 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003038 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3039
David Sharp8be07092012-11-13 12:18:22 -08003040 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003041 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003042 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3043
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003044 /* stop the trace while dumping if we are not opening "snapshot" */
3045 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003046 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003047
Steven Rostedtae3b5092013-01-23 15:22:59 -05003048 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003049 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003050 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003051 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003052 }
3053 ring_buffer_read_prepare_sync();
3054 for_each_tracing_cpu(cpu) {
3055 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003056 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003057 }
3058 } else {
3059 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003060 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003061 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003062 ring_buffer_read_prepare_sync();
3063 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003064 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003065 }
3066
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003067 mutex_unlock(&trace_types_lock);
3068
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003069 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003070
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003071 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003072 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003073 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003074 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003075release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003076 seq_release_private(inode, file);
3077 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003078}
3079
3080int tracing_open_generic(struct inode *inode, struct file *filp)
3081{
Steven Rostedt60a11772008-05-12 21:20:44 +02003082 if (tracing_disabled)
3083 return -ENODEV;
3084
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003085 filp->private_data = inode->i_private;
3086 return 0;
3087}
3088
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003089bool tracing_is_disabled(void)
3090{
3091 return (tracing_disabled) ? true: false;
3092}
3093
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003094/*
3095 * Open and update trace_array ref count.
3096 * Must have the current trace_array passed to it.
3097 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003098static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003099{
3100 struct trace_array *tr = inode->i_private;
3101
3102 if (tracing_disabled)
3103 return -ENODEV;
3104
3105 if (trace_array_get(tr) < 0)
3106 return -ENODEV;
3107
3108 filp->private_data = inode->i_private;
3109
3110 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003111}
3112
Hannes Eder4fd27352009-02-10 19:44:12 +01003113static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003114{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003115 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003116 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003117 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003118 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003119
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003120 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003121 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003122 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003123 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003124
Oleg Nesterov6484c712013-07-23 17:26:10 +02003125 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003126 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003127 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003128
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003129 for_each_tracing_cpu(cpu) {
3130 if (iter->buffer_iter[cpu])
3131 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3132 }
3133
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003134 if (iter->trace && iter->trace->close)
3135 iter->trace->close(iter);
3136
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003137 if (!iter->snapshot)
3138 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003139 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003140
3141 __trace_array_put(tr);
3142
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003143 mutex_unlock(&trace_types_lock);
3144
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003145 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003146 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003147 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003148 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003149 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003150
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003151 return 0;
3152}
3153
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003154static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3155{
3156 struct trace_array *tr = inode->i_private;
3157
3158 trace_array_put(tr);
3159 return 0;
3160}
3161
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003162static int tracing_single_release_tr(struct inode *inode, struct file *file)
3163{
3164 struct trace_array *tr = inode->i_private;
3165
3166 trace_array_put(tr);
3167
3168 return single_release(inode, file);
3169}
3170
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003171static int tracing_open(struct inode *inode, struct file *file)
3172{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003173 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003174 struct trace_iterator *iter;
3175 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003176
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003177 if (trace_array_get(tr) < 0)
3178 return -ENODEV;
3179
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003180 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003181 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3182 int cpu = tracing_get_cpu(inode);
3183
3184 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003185 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003186 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003187 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003188 }
3189
3190 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003191 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003192 if (IS_ERR(iter))
3193 ret = PTR_ERR(iter);
3194 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3195 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3196 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003197
3198 if (ret < 0)
3199 trace_array_put(tr);
3200
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003201 return ret;
3202}
3203
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003204/*
3205 * Some tracers are not suitable for instance buffers.
3206 * A tracer is always available for the global array (toplevel)
3207 * or if it explicitly states that it is.
3208 */
3209static bool
3210trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3211{
3212 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3213}
3214
3215/* Find the next tracer that this trace array may use */
3216static struct tracer *
3217get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3218{
3219 while (t && !trace_ok_for_array(t, tr))
3220 t = t->next;
3221
3222 return t;
3223}
3224
Ingo Molnare309b412008-05-12 21:20:51 +02003225static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003226t_next(struct seq_file *m, void *v, loff_t *pos)
3227{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003228 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003229 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003230
3231 (*pos)++;
3232
3233 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003234 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003235
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003236 return t;
3237}
3238
3239static void *t_start(struct seq_file *m, loff_t *pos)
3240{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003241 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003242 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003243 loff_t l = 0;
3244
3245 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003246
3247 t = get_tracer_for_array(tr, trace_types);
3248 for (; t && l < *pos; t = t_next(m, t, &l))
3249 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003250
3251 return t;
3252}
3253
3254static void t_stop(struct seq_file *m, void *p)
3255{
3256 mutex_unlock(&trace_types_lock);
3257}
3258
3259static int t_show(struct seq_file *m, void *v)
3260{
3261 struct tracer *t = v;
3262
3263 if (!t)
3264 return 0;
3265
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003266 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003267 if (t->next)
3268 seq_putc(m, ' ');
3269 else
3270 seq_putc(m, '\n');
3271
3272 return 0;
3273}
3274
James Morris88e9d342009-09-22 16:43:43 -07003275static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003276 .start = t_start,
3277 .next = t_next,
3278 .stop = t_stop,
3279 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003280};
3281
3282static int show_traces_open(struct inode *inode, struct file *file)
3283{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003284 struct trace_array *tr = inode->i_private;
3285 struct seq_file *m;
3286 int ret;
3287
Steven Rostedt60a11772008-05-12 21:20:44 +02003288 if (tracing_disabled)
3289 return -ENODEV;
3290
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003291 ret = seq_open(file, &show_traces_seq_ops);
3292 if (ret)
3293 return ret;
3294
3295 m = file->private_data;
3296 m->private = tr;
3297
3298 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003299}
3300
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003301static ssize_t
3302tracing_write_stub(struct file *filp, const char __user *ubuf,
3303 size_t count, loff_t *ppos)
3304{
3305 return count;
3306}
3307
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003308loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003309{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003310 int ret;
3311
Slava Pestov364829b2010-11-24 15:13:16 -08003312 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003313 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003314 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003315 file->f_pos = ret = 0;
3316
3317 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003318}
3319
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003320static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003321 .open = tracing_open,
3322 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003323 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003324 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003325 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003326};
3327
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003328static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003329 .open = show_traces_open,
3330 .read = seq_read,
3331 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003332 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003333};
3334
Ingo Molnar36dfe922008-05-12 21:20:52 +02003335/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003336 * The tracer itself will not take this lock, but still we want
3337 * to provide a consistent cpumask to user-space:
3338 */
3339static DEFINE_MUTEX(tracing_cpumask_update_lock);
3340
3341/*
3342 * Temporary storage for the character representation of the
3343 * CPU bitmask (and one more byte for the newline):
3344 */
3345static char mask_str[NR_CPUS + 1];
3346
Ingo Molnarc7078de2008-05-12 21:20:52 +02003347static ssize_t
3348tracing_cpumask_read(struct file *filp, char __user *ubuf,
3349 size_t count, loff_t *ppos)
3350{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003351 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003352 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003353
3354 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003355
Tejun Heo1a402432015-02-13 14:37:39 -08003356 len = snprintf(mask_str, count, "%*pb\n",
3357 cpumask_pr_args(tr->tracing_cpumask));
3358 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003359 count = -EINVAL;
3360 goto out_err;
3361 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003362 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3363
3364out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003365 mutex_unlock(&tracing_cpumask_update_lock);
3366
3367 return count;
3368}
3369
3370static ssize_t
3371tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3372 size_t count, loff_t *ppos)
3373{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003374 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303375 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003376 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303377
3378 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3379 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003380
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303381 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003382 if (err)
3383 goto err_unlock;
3384
Li Zefan215368e2009-06-15 10:56:42 +08003385 mutex_lock(&tracing_cpumask_update_lock);
3386
Steven Rostedta5e25882008-12-02 15:34:05 -05003387 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003388 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003389 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003390 /*
3391 * Increase/decrease the disabled counter if we are
3392 * about to flip a bit in the cpumask:
3393 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003394 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303395 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003396 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3397 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003398 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003399 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303400 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003401 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3402 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003403 }
3404 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003405 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003406 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003407
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003408 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003409
Ingo Molnarc7078de2008-05-12 21:20:52 +02003410 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303411 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003412
Ingo Molnarc7078de2008-05-12 21:20:52 +02003413 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003414
3415err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003416 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003417
3418 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003419}
3420
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003421static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003422 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003423 .read = tracing_cpumask_read,
3424 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003425 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003426 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003427};
3428
Li Zefanfdb372e2009-12-08 11:15:59 +08003429static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003430{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003431 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003432 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003433 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003434 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003435
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003436 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003437 tracer_flags = tr->current_trace->flags->val;
3438 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003439
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003440 for (i = 0; trace_options[i]; i++) {
3441 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003442 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003444 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003445 }
3446
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003447 for (i = 0; trace_opts[i].name; i++) {
3448 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003449 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003450 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003451 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003452 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003453 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003454
Li Zefanfdb372e2009-12-08 11:15:59 +08003455 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003456}
3457
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003458static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003459 struct tracer_flags *tracer_flags,
3460 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003461{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003462 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003463 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003464
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003465 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003466 if (ret)
3467 return ret;
3468
3469 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003470 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003471 else
Zhaolei77708412009-08-07 18:53:21 +08003472 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003473 return 0;
3474}
3475
Li Zefan8d18eaa2009-12-08 11:17:06 +08003476/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003477static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003478{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003479 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003480 struct tracer_flags *tracer_flags = trace->flags;
3481 struct tracer_opt *opts = NULL;
3482 int i;
3483
3484 for (i = 0; tracer_flags->opts[i].name; i++) {
3485 opts = &tracer_flags->opts[i];
3486
3487 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003488 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003489 }
3490
3491 return -EINVAL;
3492}
3493
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003494/* Some tracers require overwrite to stay enabled */
3495int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3496{
3497 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3498 return -1;
3499
3500 return 0;
3501}
3502
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003503int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003504{
3505 /* do nothing if flag is already set */
3506 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003507 return 0;
3508
3509 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003510 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003511 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003512 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003513
3514 if (enabled)
3515 trace_flags |= mask;
3516 else
3517 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003518
3519 if (mask == TRACE_ITER_RECORD_CMD)
3520 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003521
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003522 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003523 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003524#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003525 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003526#endif
3527 }
Steven Rostedt81698832012-10-11 10:15:05 -04003528
3529 if (mask == TRACE_ITER_PRINTK)
3530 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003531
3532 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003533}
3534
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003535static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003536{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003537 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003538 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003539 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003540 int i;
3541
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003542 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003543
Li Zefan8d18eaa2009-12-08 11:17:06 +08003544 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003545 neg = 1;
3546 cmp += 2;
3547 }
3548
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003549 mutex_lock(&trace_types_lock);
3550
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003551 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003552 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003553 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003554 break;
3555 }
3556 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003557
3558 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003559 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003560 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003561
3562 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003563
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003564 return ret;
3565}
3566
3567static ssize_t
3568tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3569 size_t cnt, loff_t *ppos)
3570{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003571 struct seq_file *m = filp->private_data;
3572 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003573 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003574 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003575
3576 if (cnt >= sizeof(buf))
3577 return -EINVAL;
3578
3579 if (copy_from_user(&buf, ubuf, cnt))
3580 return -EFAULT;
3581
Steven Rostedta8dd2172013-01-09 20:54:17 -05003582 buf[cnt] = 0;
3583
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003584 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003585 if (ret < 0)
3586 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003587
Jiri Olsacf8517c2009-10-23 19:36:16 -04003588 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003589
3590 return cnt;
3591}
3592
Li Zefanfdb372e2009-12-08 11:15:59 +08003593static int tracing_trace_options_open(struct inode *inode, struct file *file)
3594{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003595 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003596 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003597
Li Zefanfdb372e2009-12-08 11:15:59 +08003598 if (tracing_disabled)
3599 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003600
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003601 if (trace_array_get(tr) < 0)
3602 return -ENODEV;
3603
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003604 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3605 if (ret < 0)
3606 trace_array_put(tr);
3607
3608 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003609}
3610
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003611static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003612 .open = tracing_trace_options_open,
3613 .read = seq_read,
3614 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003615 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003616 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003617};
3618
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003619static const char readme_msg[] =
3620 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003621 "# echo 0 > tracing_on : quick way to disable tracing\n"
3622 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3623 " Important files:\n"
3624 " trace\t\t\t- The static contents of the buffer\n"
3625 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3626 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3627 " current_tracer\t- function and latency tracers\n"
3628 " available_tracers\t- list of configured tracers for current_tracer\n"
3629 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3630 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3631 " trace_clock\t\t-change the clock used to order events\n"
3632 " local: Per cpu clock but may not be synced across CPUs\n"
3633 " global: Synced across CPUs but slows tracing down.\n"
3634 " counter: Not a clock, but just an increment\n"
3635 " uptime: Jiffy counter from time of boot\n"
3636 " perf: Same clock that perf events use\n"
3637#ifdef CONFIG_X86_64
3638 " x86-tsc: TSC cycle counter\n"
3639#endif
3640 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3641 " tracing_cpumask\t- Limit which CPUs to trace\n"
3642 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3643 "\t\t\t Remove sub-buffer with rmdir\n"
3644 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003645 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3646 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003647 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003648#ifdef CONFIG_DYNAMIC_FTRACE
3649 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003650 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3651 "\t\t\t functions\n"
3652 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3653 "\t modules: Can select a group via module\n"
3654 "\t Format: :mod:<module-name>\n"
3655 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3656 "\t triggers: a command to perform when function is hit\n"
3657 "\t Format: <function>:<trigger>[:count]\n"
3658 "\t trigger: traceon, traceoff\n"
3659 "\t\t enable_event:<system>:<event>\n"
3660 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003661#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003662 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003663#endif
3664#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003665 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003666#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003667 "\t\t dump\n"
3668 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003669 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3670 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3671 "\t The first one will disable tracing every time do_fault is hit\n"
3672 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3673 "\t The first time do trap is hit and it disables tracing, the\n"
3674 "\t counter will decrement to 2. If tracing is already disabled,\n"
3675 "\t the counter will not decrement. It only decrements when the\n"
3676 "\t trigger did work\n"
3677 "\t To remove trigger without count:\n"
3678 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3679 "\t To remove trigger with a count:\n"
3680 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003681 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003682 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3683 "\t modules: Can select a group via module command :mod:\n"
3684 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003685#endif /* CONFIG_DYNAMIC_FTRACE */
3686#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003687 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3688 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003689#endif
3690#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3691 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003692 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003693 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3694#endif
3695#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003696 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3697 "\t\t\t snapshot buffer. Read the contents for more\n"
3698 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003699#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003700#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003701 " stack_trace\t\t- Shows the max stack trace when active\n"
3702 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003703 "\t\t\t Write into this file to reset the max size (trigger a\n"
3704 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003705#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003706 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3707 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003708#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003709#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003710 " events/\t\t- Directory containing all trace event subsystems:\n"
3711 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3712 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003713 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3714 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003715 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003716 " events/<system>/<event>/\t- Directory containing control files for\n"
3717 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003718 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3719 " filter\t\t- If set, only events passing filter are traced\n"
3720 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003721 "\t Format: <trigger>[:count][if <filter>]\n"
3722 "\t trigger: traceon, traceoff\n"
3723 "\t enable_event:<system>:<event>\n"
3724 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003725#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003726 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003727#endif
3728#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003729 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003730#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003731 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3732 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3733 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3734 "\t events/block/block_unplug/trigger\n"
3735 "\t The first disables tracing every time block_unplug is hit.\n"
3736 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3737 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3738 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3739 "\t Like function triggers, the counter is only decremented if it\n"
3740 "\t enabled or disabled tracing.\n"
3741 "\t To remove a trigger without a count:\n"
3742 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3743 "\t To remove a trigger with a count:\n"
3744 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3745 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003746;
3747
3748static ssize_t
3749tracing_readme_read(struct file *filp, char __user *ubuf,
3750 size_t cnt, loff_t *ppos)
3751{
3752 return simple_read_from_buffer(ubuf, cnt, ppos,
3753 readme_msg, strlen(readme_msg));
3754}
3755
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003756static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003757 .open = tracing_open_generic,
3758 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003759 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003760};
3761
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003762static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003763{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003764 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003765
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003766 if (*pos || m->count)
3767 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003768
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003769 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003770
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003771 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3772 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003773 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003774 continue;
3775
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003776 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003777 }
3778
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003779 return NULL;
3780}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003781
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003782static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3783{
3784 void *v;
3785 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003786
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003787 preempt_disable();
3788 arch_spin_lock(&trace_cmdline_lock);
3789
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003790 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003791 while (l <= *pos) {
3792 v = saved_cmdlines_next(m, v, &l);
3793 if (!v)
3794 return NULL;
3795 }
3796
3797 return v;
3798}
3799
3800static void saved_cmdlines_stop(struct seq_file *m, void *v)
3801{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003802 arch_spin_unlock(&trace_cmdline_lock);
3803 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003804}
3805
3806static int saved_cmdlines_show(struct seq_file *m, void *v)
3807{
3808 char buf[TASK_COMM_LEN];
3809 unsigned int *pid = v;
3810
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003811 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003812 seq_printf(m, "%d %s\n", *pid, buf);
3813 return 0;
3814}
3815
3816static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3817 .start = saved_cmdlines_start,
3818 .next = saved_cmdlines_next,
3819 .stop = saved_cmdlines_stop,
3820 .show = saved_cmdlines_show,
3821};
3822
3823static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3824{
3825 if (tracing_disabled)
3826 return -ENODEV;
3827
3828 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003829}
3830
3831static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003832 .open = tracing_saved_cmdlines_open,
3833 .read = seq_read,
3834 .llseek = seq_lseek,
3835 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003836};
3837
3838static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003839tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3840 size_t cnt, loff_t *ppos)
3841{
3842 char buf[64];
3843 int r;
3844
3845 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003846 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003847 arch_spin_unlock(&trace_cmdline_lock);
3848
3849 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3850}
3851
3852static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3853{
3854 kfree(s->saved_cmdlines);
3855 kfree(s->map_cmdline_to_pid);
3856 kfree(s);
3857}
3858
3859static int tracing_resize_saved_cmdlines(unsigned int val)
3860{
3861 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3862
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003863 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003864 if (!s)
3865 return -ENOMEM;
3866
3867 if (allocate_cmdlines_buffer(val, s) < 0) {
3868 kfree(s);
3869 return -ENOMEM;
3870 }
3871
3872 arch_spin_lock(&trace_cmdline_lock);
3873 savedcmd_temp = savedcmd;
3874 savedcmd = s;
3875 arch_spin_unlock(&trace_cmdline_lock);
3876 free_saved_cmdlines_buffer(savedcmd_temp);
3877
3878 return 0;
3879}
3880
3881static ssize_t
3882tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3883 size_t cnt, loff_t *ppos)
3884{
3885 unsigned long val;
3886 int ret;
3887
3888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3889 if (ret)
3890 return ret;
3891
3892 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3893 if (!val || val > PID_MAX_DEFAULT)
3894 return -EINVAL;
3895
3896 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3897 if (ret < 0)
3898 return ret;
3899
3900 *ppos += cnt;
3901
3902 return cnt;
3903}
3904
3905static const struct file_operations tracing_saved_cmdlines_size_fops = {
3906 .open = tracing_open_generic,
3907 .read = tracing_saved_cmdlines_size_read,
3908 .write = tracing_saved_cmdlines_size_write,
3909};
3910
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04003911static void
3912trace_insert_enum_map(struct trace_enum_map **start, struct trace_enum_map **stop)
3913{
3914 struct trace_enum_map **map;
3915 int len = stop - start;
3916
3917 if (len <= 0)
3918 return;
3919
3920 map = start;
3921
3922 trace_event_enum_update(map, len);
3923}
3924
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003925static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003926tracing_set_trace_read(struct file *filp, char __user *ubuf,
3927 size_t cnt, loff_t *ppos)
3928{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003929 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003930 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003931 int r;
3932
3933 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003934 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003935 mutex_unlock(&trace_types_lock);
3936
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003937 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003938}
3939
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003940int tracer_init(struct tracer *t, struct trace_array *tr)
3941{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003942 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003943 return t->init(tr);
3944}
3945
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003946static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003947{
3948 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003949
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003950 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003951 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003952}
3953
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003955/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003956static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3957 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003958{
3959 int cpu, ret = 0;
3960
3961 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3962 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003963 ret = ring_buffer_resize(trace_buf->buffer,
3964 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003965 if (ret < 0)
3966 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003967 per_cpu_ptr(trace_buf->data, cpu)->entries =
3968 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003969 }
3970 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003971 ret = ring_buffer_resize(trace_buf->buffer,
3972 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003973 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003974 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3975 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003976 }
3977
3978 return ret;
3979}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003980#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003981
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003982static int __tracing_resize_ring_buffer(struct trace_array *tr,
3983 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003984{
3985 int ret;
3986
3987 /*
3988 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003989 * we use the size that was given, and we can forget about
3990 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003991 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003992 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003993
Steven Rostedtb382ede62012-10-10 21:44:34 -04003994 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003995 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003996 return 0;
3997
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003998 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003999 if (ret < 0)
4000 return ret;
4001
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004002#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004003 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4004 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004005 goto out;
4006
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004007 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004008 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004009 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4010 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004011 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004012 /*
4013 * AARGH! We are left with different
4014 * size max buffer!!!!
4015 * The max buffer is our "snapshot" buffer.
4016 * When a tracer needs a snapshot (one of the
4017 * latency tracers), it swaps the max buffer
4018 * with the saved snap shot. We succeeded to
4019 * update the size of the main buffer, but failed to
4020 * update the size of the max buffer. But when we tried
4021 * to reset the main buffer to the original size, we
4022 * failed there too. This is very unlikely to
4023 * happen, but if it does, warn and kill all
4024 * tracing.
4025 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004026 WARN_ON(1);
4027 tracing_disabled = 1;
4028 }
4029 return ret;
4030 }
4031
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004032 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004033 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004034 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004035 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004036
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004037 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004038#endif /* CONFIG_TRACER_MAX_TRACE */
4039
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004040 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004041 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004042 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004043 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004044
4045 return ret;
4046}
4047
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004048static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4049 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004050{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004051 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004052
4053 mutex_lock(&trace_types_lock);
4054
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004055 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4056 /* make sure, this cpu is enabled in the mask */
4057 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4058 ret = -EINVAL;
4059 goto out;
4060 }
4061 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004062
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004063 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004064 if (ret < 0)
4065 ret = -ENOMEM;
4066
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004067out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004068 mutex_unlock(&trace_types_lock);
4069
4070 return ret;
4071}
4072
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004073
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004074/**
4075 * tracing_update_buffers - used by tracing facility to expand ring buffers
4076 *
4077 * To save on memory when the tracing is never used on a system with it
4078 * configured in. The ring buffers are set to a minimum size. But once
4079 * a user starts to use the tracing facility, then they need to grow
4080 * to their default size.
4081 *
4082 * This function is to be called when a tracer is about to be used.
4083 */
4084int tracing_update_buffers(void)
4085{
4086 int ret = 0;
4087
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004088 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004089 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004090 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004091 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004092 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004093
4094 return ret;
4095}
4096
Steven Rostedt577b7852009-02-26 23:43:05 -05004097struct trace_option_dentry;
4098
4099static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004100create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004101
4102static void
4103destroy_trace_option_files(struct trace_option_dentry *topts);
4104
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004105/*
4106 * Used to clear out the tracer before deletion of an instance.
4107 * Must have trace_types_lock held.
4108 */
4109static void tracing_set_nop(struct trace_array *tr)
4110{
4111 if (tr->current_trace == &nop_trace)
4112 return;
4113
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004114 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004115
4116 if (tr->current_trace->reset)
4117 tr->current_trace->reset(tr);
4118
4119 tr->current_trace = &nop_trace;
4120}
4121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004122static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004123{
Steven Rostedt577b7852009-02-26 23:43:05 -05004124 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004125 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004126#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004127 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004128#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004129 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004130
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004131 mutex_lock(&trace_types_lock);
4132
Steven Rostedt73c51622009-03-11 13:42:01 -04004133 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004134 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004135 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004136 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004137 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004138 ret = 0;
4139 }
4140
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004141 for (t = trace_types; t; t = t->next) {
4142 if (strcmp(t->name, buf) == 0)
4143 break;
4144 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004145 if (!t) {
4146 ret = -EINVAL;
4147 goto out;
4148 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004149 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004150 goto out;
4151
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004152 /* Some tracers are only allowed for the top level buffer */
4153 if (!trace_ok_for_array(t, tr)) {
4154 ret = -EINVAL;
4155 goto out;
4156 }
4157
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004158 /* If trace pipe files are being read, we can't change the tracer */
4159 if (tr->current_trace->ref) {
4160 ret = -EBUSY;
4161 goto out;
4162 }
4163
Steven Rostedt9f029e82008-11-12 15:24:24 -05004164 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004165
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004166 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004167
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004168 if (tr->current_trace->reset)
4169 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004170
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004171 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004172 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004173
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004174#ifdef CONFIG_TRACER_MAX_TRACE
4175 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004176
4177 if (had_max_tr && !t->use_max_tr) {
4178 /*
4179 * We need to make sure that the update_max_tr sees that
4180 * current_trace changed to nop_trace to keep it from
4181 * swapping the buffers after we resize it.
4182 * The update_max_tr is called from interrupts disabled
4183 * so a synchronized_sched() is sufficient.
4184 */
4185 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004186 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004187 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004188#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004189 /* Currently, only the top instance has options */
4190 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4191 destroy_trace_option_files(topts);
4192 topts = create_trace_option_files(tr, t);
4193 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004194
4195#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004196 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004197 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004198 if (ret < 0)
4199 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004200 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004201#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004202
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004203 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004204 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004205 if (ret)
4206 goto out;
4207 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004208
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004209 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004210 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004211 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004212 out:
4213 mutex_unlock(&trace_types_lock);
4214
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004215 return ret;
4216}
4217
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004218static ssize_t
4219tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4220 size_t cnt, loff_t *ppos)
4221{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004222 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004223 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004224 int i;
4225 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004226 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004227
Steven Rostedt60063a62008-10-28 10:44:24 -04004228 ret = cnt;
4229
Li Zefanee6c2c12009-09-18 14:06:47 +08004230 if (cnt > MAX_TRACER_SIZE)
4231 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004232
4233 if (copy_from_user(&buf, ubuf, cnt))
4234 return -EFAULT;
4235
4236 buf[cnt] = 0;
4237
4238 /* strip ending whitespace. */
4239 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4240 buf[i] = 0;
4241
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004242 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004243 if (err)
4244 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004245
Jiri Olsacf8517c2009-10-23 19:36:16 -04004246 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004247
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004248 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004249}
4250
4251static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004252tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4253 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004254{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004255 char buf[64];
4256 int r;
4257
Steven Rostedtcffae432008-05-12 21:21:00 +02004258 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004259 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004260 if (r > sizeof(buf))
4261 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004262 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004263}
4264
4265static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004266tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4267 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004268{
Hannes Eder5e398412009-02-10 19:44:34 +01004269 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004270 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004271
Peter Huewe22fe9b52011-06-07 21:58:27 +02004272 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4273 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004274 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004275
4276 *ptr = val * 1000;
4277
4278 return cnt;
4279}
4280
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004281static ssize_t
4282tracing_thresh_read(struct file *filp, char __user *ubuf,
4283 size_t cnt, loff_t *ppos)
4284{
4285 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4286}
4287
4288static ssize_t
4289tracing_thresh_write(struct file *filp, const char __user *ubuf,
4290 size_t cnt, loff_t *ppos)
4291{
4292 struct trace_array *tr = filp->private_data;
4293 int ret;
4294
4295 mutex_lock(&trace_types_lock);
4296 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4297 if (ret < 0)
4298 goto out;
4299
4300 if (tr->current_trace->update_thresh) {
4301 ret = tr->current_trace->update_thresh(tr);
4302 if (ret < 0)
4303 goto out;
4304 }
4305
4306 ret = cnt;
4307out:
4308 mutex_unlock(&trace_types_lock);
4309
4310 return ret;
4311}
4312
4313static ssize_t
4314tracing_max_lat_read(struct file *filp, char __user *ubuf,
4315 size_t cnt, loff_t *ppos)
4316{
4317 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4318}
4319
4320static ssize_t
4321tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4322 size_t cnt, loff_t *ppos)
4323{
4324 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4325}
4326
Steven Rostedtb3806b42008-05-12 21:20:46 +02004327static int tracing_open_pipe(struct inode *inode, struct file *filp)
4328{
Oleg Nesterov15544202013-07-23 17:25:57 +02004329 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004330 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004331 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004332
4333 if (tracing_disabled)
4334 return -ENODEV;
4335
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004336 if (trace_array_get(tr) < 0)
4337 return -ENODEV;
4338
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004339 mutex_lock(&trace_types_lock);
4340
Steven Rostedtb3806b42008-05-12 21:20:46 +02004341 /* create a buffer to store the information to pass to userspace */
4342 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004343 if (!iter) {
4344 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004345 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004346 goto out;
4347 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004348
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004349 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004350 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004351
4352 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4353 ret = -ENOMEM;
4354 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304355 }
4356
Steven Rostedta3097202008-11-07 22:36:02 -05004357 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304358 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004359
Steven Rostedt112f38a72009-06-01 15:16:05 -04004360 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4361 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4362
David Sharp8be07092012-11-13 12:18:22 -08004363 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004364 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004365 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4366
Oleg Nesterov15544202013-07-23 17:25:57 +02004367 iter->tr = tr;
4368 iter->trace_buffer = &tr->trace_buffer;
4369 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004370 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004371 filp->private_data = iter;
4372
Steven Rostedt107bad82008-05-12 21:21:01 +02004373 if (iter->trace->pipe_open)
4374 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004375
Arnd Bergmannb4447862010-07-07 23:40:11 +02004376 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004377
4378 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004379out:
4380 mutex_unlock(&trace_types_lock);
4381 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004382
4383fail:
4384 kfree(iter->trace);
4385 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004386 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004387 mutex_unlock(&trace_types_lock);
4388 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004389}
4390
4391static int tracing_release_pipe(struct inode *inode, struct file *file)
4392{
4393 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004394 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004395
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004396 mutex_lock(&trace_types_lock);
4397
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004398 tr->current_trace->ref--;
4399
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004400 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004401 iter->trace->pipe_close(iter);
4402
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004403 mutex_unlock(&trace_types_lock);
4404
Rusty Russell44623442009-01-01 10:12:23 +10304405 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004406 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004407 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004408
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004409 trace_array_put(tr);
4410
Steven Rostedtb3806b42008-05-12 21:20:46 +02004411 return 0;
4412}
4413
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004414static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004415trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004416{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004417 /* Iterators are static, they should be filled or empty */
4418 if (trace_buffer_iter(iter, iter->cpu_file))
4419 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004420
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004421 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004422 /*
4423 * Always select as readable when in blocking mode
4424 */
4425 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004426 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004427 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004428 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004429}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004430
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004431static unsigned int
4432tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4433{
4434 struct trace_iterator *iter = filp->private_data;
4435
4436 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004437}
4438
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004439/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004440static int tracing_wait_pipe(struct file *filp)
4441{
4442 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004443 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004444
4445 while (trace_empty(iter)) {
4446
4447 if ((filp->f_flags & O_NONBLOCK)) {
4448 return -EAGAIN;
4449 }
4450
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004451 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004452 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004453 * We still block if tracing is disabled, but we have never
4454 * read anything. This allows a user to cat this file, and
4455 * then enable tracing. But after we have read something,
4456 * we give an EOF when tracing is again disabled.
4457 *
4458 * iter->pos will be 0 if we haven't read anything.
4459 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004460 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004461 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004462
4463 mutex_unlock(&iter->mutex);
4464
Rabin Vincente30f53a2014-11-10 19:46:34 +01004465 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004466
4467 mutex_lock(&iter->mutex);
4468
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004469 if (ret)
4470 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004471 }
4472
4473 return 1;
4474}
4475
Steven Rostedtb3806b42008-05-12 21:20:46 +02004476/*
4477 * Consumer reader.
4478 */
4479static ssize_t
4480tracing_read_pipe(struct file *filp, char __user *ubuf,
4481 size_t cnt, loff_t *ppos)
4482{
4483 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004484 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004485
4486 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004487 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4488 if (sret != -EBUSY)
4489 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004490
Steven Rostedtf9520752009-03-02 14:04:40 -05004491 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004492
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004493 /*
4494 * Avoid more than one consumer on a single file descriptor
4495 * This is just a matter of traces coherency, the ring buffer itself
4496 * is protected.
4497 */
4498 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004499 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004500 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4501 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004502 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004503 }
4504
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004505waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004506 sret = tracing_wait_pipe(filp);
4507 if (sret <= 0)
4508 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004509
4510 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004511 if (trace_empty(iter)) {
4512 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004513 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004514 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004515
4516 if (cnt >= PAGE_SIZE)
4517 cnt = PAGE_SIZE - 1;
4518
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004519 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004520 memset(&iter->seq, 0,
4521 sizeof(struct trace_iterator) -
4522 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004523 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004524 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004525
Lai Jiangshan4f535962009-05-18 19:35:34 +08004526 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004527 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004528 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004529 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004530 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004531
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004532 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004533 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004534 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004535 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004536 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004537 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004538 if (ret != TRACE_TYPE_NO_CONSUME)
4539 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004540
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004541 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004542 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004543
4544 /*
4545 * Setting the full flag means we reached the trace_seq buffer
4546 * size and we should leave by partial output condition above.
4547 * One of the trace_seq_* functions is not used properly.
4548 */
4549 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4550 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004551 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004552 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004553 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004554
Steven Rostedtb3806b42008-05-12 21:20:46 +02004555 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004556 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004557 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004558 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004559
4560 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004561 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004562 * entries, go back to wait for more entries.
4563 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004564 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004565 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004566
Steven Rostedt107bad82008-05-12 21:21:01 +02004567out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004568 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004569
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004570 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004571}
4572
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004573static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4574 unsigned int idx)
4575{
4576 __free_page(spd->pages[idx]);
4577}
4578
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004579static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004580 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004581 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004582 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004583 .steal = generic_pipe_buf_steal,
4584 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004585};
4586
Steven Rostedt34cd4992009-02-09 12:06:29 -05004587static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004588tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004589{
4590 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004591 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004592 int ret;
4593
4594 /* Seq buffer is page-sized, exactly what we need. */
4595 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004596 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004597 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004598
4599 if (trace_seq_has_overflowed(&iter->seq)) {
4600 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004601 break;
4602 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004603
4604 /*
4605 * This should not be hit, because it should only
4606 * be set if the iter->seq overflowed. But check it
4607 * anyway to be safe.
4608 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004609 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004610 iter->seq.seq.len = save_len;
4611 break;
4612 }
4613
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004614 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004615 if (rem < count) {
4616 rem = 0;
4617 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004618 break;
4619 }
4620
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004621 if (ret != TRACE_TYPE_NO_CONSUME)
4622 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004623 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004624 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004625 rem = 0;
4626 iter->ent = NULL;
4627 break;
4628 }
4629 }
4630
4631 return rem;
4632}
4633
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004634static ssize_t tracing_splice_read_pipe(struct file *filp,
4635 loff_t *ppos,
4636 struct pipe_inode_info *pipe,
4637 size_t len,
4638 unsigned int flags)
4639{
Jens Axboe35f3d142010-05-20 10:43:18 +02004640 struct page *pages_def[PIPE_DEF_BUFFERS];
4641 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004642 struct trace_iterator *iter = filp->private_data;
4643 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004644 .pages = pages_def,
4645 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004646 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004647 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004648 .flags = flags,
4649 .ops = &tracing_pipe_buf_ops,
4650 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004651 };
4652 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004653 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004654 unsigned int i;
4655
Jens Axboe35f3d142010-05-20 10:43:18 +02004656 if (splice_grow_spd(pipe, &spd))
4657 return -ENOMEM;
4658
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004659 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004660
4661 if (iter->trace->splice_read) {
4662 ret = iter->trace->splice_read(iter, filp,
4663 ppos, pipe, len, flags);
4664 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004665 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004666 }
4667
4668 ret = tracing_wait_pipe(filp);
4669 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004670 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004671
Jason Wessel955b61e2010-08-05 09:22:23 -05004672 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004673 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004674 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004675 }
4676
Lai Jiangshan4f535962009-05-18 19:35:34 +08004677 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004678 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004679
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004680 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004681 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004682 spd.pages[i] = alloc_page(GFP_KERNEL);
4683 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004684 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004685
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004686 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004687
4688 /* Copy the data into the page, so we can start over. */
4689 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004690 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004691 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004692 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004693 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004694 break;
4695 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004696 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004697 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004698
Steven Rostedtf9520752009-03-02 14:04:40 -05004699 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004700 }
4701
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004702 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004703 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004704 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004705
4706 spd.nr_pages = i;
4707
Jens Axboe35f3d142010-05-20 10:43:18 +02004708 ret = splice_to_pipe(pipe, &spd);
4709out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004710 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004711 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004712
Steven Rostedt34cd4992009-02-09 12:06:29 -05004713out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004714 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004715 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004716}
4717
Steven Rostedta98a3c32008-05-12 21:20:59 +02004718static ssize_t
4719tracing_entries_read(struct file *filp, char __user *ubuf,
4720 size_t cnt, loff_t *ppos)
4721{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004722 struct inode *inode = file_inode(filp);
4723 struct trace_array *tr = inode->i_private;
4724 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004725 char buf[64];
4726 int r = 0;
4727 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004728
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004729 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004730
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004731 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004732 int cpu, buf_size_same;
4733 unsigned long size;
4734
4735 size = 0;
4736 buf_size_same = 1;
4737 /* check if all cpu sizes are same */
4738 for_each_tracing_cpu(cpu) {
4739 /* fill in the size from first enabled cpu */
4740 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004741 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4742 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004743 buf_size_same = 0;
4744 break;
4745 }
4746 }
4747
4748 if (buf_size_same) {
4749 if (!ring_buffer_expanded)
4750 r = sprintf(buf, "%lu (expanded: %lu)\n",
4751 size >> 10,
4752 trace_buf_size >> 10);
4753 else
4754 r = sprintf(buf, "%lu\n", size >> 10);
4755 } else
4756 r = sprintf(buf, "X\n");
4757 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004758 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004759
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004760 mutex_unlock(&trace_types_lock);
4761
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004762 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4763 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004764}
4765
4766static ssize_t
4767tracing_entries_write(struct file *filp, const char __user *ubuf,
4768 size_t cnt, loff_t *ppos)
4769{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004770 struct inode *inode = file_inode(filp);
4771 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004772 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004773 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004774
Peter Huewe22fe9b52011-06-07 21:58:27 +02004775 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4776 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004777 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004778
4779 /* must have at least 1 entry */
4780 if (!val)
4781 return -EINVAL;
4782
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004783 /* value is in KB */
4784 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004785 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004786 if (ret < 0)
4787 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004788
Jiri Olsacf8517c2009-10-23 19:36:16 -04004789 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004790
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004791 return cnt;
4792}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004793
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004794static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004795tracing_total_entries_read(struct file *filp, char __user *ubuf,
4796 size_t cnt, loff_t *ppos)
4797{
4798 struct trace_array *tr = filp->private_data;
4799 char buf[64];
4800 int r, cpu;
4801 unsigned long size = 0, expanded_size = 0;
4802
4803 mutex_lock(&trace_types_lock);
4804 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004805 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004806 if (!ring_buffer_expanded)
4807 expanded_size += trace_buf_size >> 10;
4808 }
4809 if (ring_buffer_expanded)
4810 r = sprintf(buf, "%lu\n", size);
4811 else
4812 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4813 mutex_unlock(&trace_types_lock);
4814
4815 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4816}
4817
4818static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004819tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4820 size_t cnt, loff_t *ppos)
4821{
4822 /*
4823 * There is no need to read what the user has written, this function
4824 * is just to make sure that there is no error when "echo" is used
4825 */
4826
4827 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004828
4829 return cnt;
4830}
4831
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004832static int
4833tracing_free_buffer_release(struct inode *inode, struct file *filp)
4834{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004835 struct trace_array *tr = inode->i_private;
4836
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004837 /* disable tracing ? */
4838 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004839 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004840 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004841 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004842
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004843 trace_array_put(tr);
4844
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004845 return 0;
4846}
4847
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004848static ssize_t
4849tracing_mark_write(struct file *filp, const char __user *ubuf,
4850 size_t cnt, loff_t *fpos)
4851{
Steven Rostedtd696b582011-09-22 11:50:27 -04004852 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004853 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004854 struct ring_buffer_event *event;
4855 struct ring_buffer *buffer;
4856 struct print_entry *entry;
4857 unsigned long irq_flags;
4858 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004859 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004860 int nr_pages = 1;
4861 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004862 int offset;
4863 int size;
4864 int len;
4865 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004866 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004867
Steven Rostedtc76f0692008-11-07 22:36:02 -05004868 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004869 return -EINVAL;
4870
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004871 if (!(trace_flags & TRACE_ITER_MARKERS))
4872 return -EINVAL;
4873
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004874 if (cnt > TRACE_BUF_SIZE)
4875 cnt = TRACE_BUF_SIZE;
4876
Steven Rostedtd696b582011-09-22 11:50:27 -04004877 /*
4878 * Userspace is injecting traces into the kernel trace buffer.
4879 * We want to be as non intrusive as possible.
4880 * To do so, we do not want to allocate any special buffers
4881 * or take any locks, but instead write the userspace data
4882 * straight into the ring buffer.
4883 *
4884 * First we need to pin the userspace buffer into memory,
4885 * which, most likely it is, because it just referenced it.
4886 * But there's no guarantee that it is. By using get_user_pages_fast()
4887 * and kmap_atomic/kunmap_atomic() we can get access to the
4888 * pages directly. We then write the data directly into the
4889 * ring buffer.
4890 */
4891 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004892
Steven Rostedtd696b582011-09-22 11:50:27 -04004893 /* check if we cross pages */
4894 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4895 nr_pages = 2;
4896
4897 offset = addr & (PAGE_SIZE - 1);
4898 addr &= PAGE_MASK;
4899
4900 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4901 if (ret < nr_pages) {
4902 while (--ret >= 0)
4903 put_page(pages[ret]);
4904 written = -EFAULT;
4905 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004906 }
4907
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004908 for (i = 0; i < nr_pages; i++)
4909 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004910
4911 local_save_flags(irq_flags);
4912 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004913 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004914 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4915 irq_flags, preempt_count());
4916 if (!event) {
4917 /* Ring buffer disabled, return as if not open for write */
4918 written = -EBADF;
4919 goto out_unlock;
4920 }
4921
4922 entry = ring_buffer_event_data(event);
4923 entry->ip = _THIS_IP_;
4924
4925 if (nr_pages == 2) {
4926 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004927 memcpy(&entry->buf, map_page[0] + offset, len);
4928 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004929 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004930 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004931
4932 if (entry->buf[cnt - 1] != '\n') {
4933 entry->buf[cnt] = '\n';
4934 entry->buf[cnt + 1] = '\0';
4935 } else
4936 entry->buf[cnt] = '\0';
4937
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004938 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004939
4940 written = cnt;
4941
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004942 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004943
Steven Rostedtd696b582011-09-22 11:50:27 -04004944 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08004945 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004946 kunmap_atomic(map_page[i]);
4947 put_page(pages[i]);
4948 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004949 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004950 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004951}
4952
Li Zefan13f16d22009-12-08 11:16:11 +08004953static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004954{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004955 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004956 int i;
4957
4958 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004959 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004960 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004961 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4962 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004963 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004964
Li Zefan13f16d22009-12-08 11:16:11 +08004965 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004966}
4967
Steven Rostedte1e232c2014-02-10 23:38:46 -05004968static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004969{
Zhaolei5079f322009-08-25 16:12:56 +08004970 int i;
4971
Zhaolei5079f322009-08-25 16:12:56 +08004972 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4973 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4974 break;
4975 }
4976 if (i == ARRAY_SIZE(trace_clocks))
4977 return -EINVAL;
4978
Zhaolei5079f322009-08-25 16:12:56 +08004979 mutex_lock(&trace_types_lock);
4980
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004981 tr->clock_id = i;
4982
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004983 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004984
David Sharp60303ed2012-10-11 16:27:52 -07004985 /*
4986 * New clock may not be consistent with the previous clock.
4987 * Reset the buffer so that it doesn't have incomparable timestamps.
4988 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004989 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004990
4991#ifdef CONFIG_TRACER_MAX_TRACE
4992 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4993 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004994 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004995#endif
David Sharp60303ed2012-10-11 16:27:52 -07004996
Zhaolei5079f322009-08-25 16:12:56 +08004997 mutex_unlock(&trace_types_lock);
4998
Steven Rostedte1e232c2014-02-10 23:38:46 -05004999 return 0;
5000}
5001
5002static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5003 size_t cnt, loff_t *fpos)
5004{
5005 struct seq_file *m = filp->private_data;
5006 struct trace_array *tr = m->private;
5007 char buf[64];
5008 const char *clockstr;
5009 int ret;
5010
5011 if (cnt >= sizeof(buf))
5012 return -EINVAL;
5013
5014 if (copy_from_user(&buf, ubuf, cnt))
5015 return -EFAULT;
5016
5017 buf[cnt] = 0;
5018
5019 clockstr = strstrip(buf);
5020
5021 ret = tracing_set_clock(tr, clockstr);
5022 if (ret)
5023 return ret;
5024
Zhaolei5079f322009-08-25 16:12:56 +08005025 *fpos += cnt;
5026
5027 return cnt;
5028}
5029
Li Zefan13f16d22009-12-08 11:16:11 +08005030static int tracing_clock_open(struct inode *inode, struct file *file)
5031{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005032 struct trace_array *tr = inode->i_private;
5033 int ret;
5034
Li Zefan13f16d22009-12-08 11:16:11 +08005035 if (tracing_disabled)
5036 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005037
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005038 if (trace_array_get(tr))
5039 return -ENODEV;
5040
5041 ret = single_open(file, tracing_clock_show, inode->i_private);
5042 if (ret < 0)
5043 trace_array_put(tr);
5044
5045 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005046}
5047
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005048struct ftrace_buffer_info {
5049 struct trace_iterator iter;
5050 void *spare;
5051 unsigned int read;
5052};
5053
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005054#ifdef CONFIG_TRACER_SNAPSHOT
5055static int tracing_snapshot_open(struct inode *inode, struct file *file)
5056{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005057 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005058 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005059 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005060 int ret = 0;
5061
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005062 if (trace_array_get(tr) < 0)
5063 return -ENODEV;
5064
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005065 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005066 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005067 if (IS_ERR(iter))
5068 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005069 } else {
5070 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005071 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005072 m = kzalloc(sizeof(*m), GFP_KERNEL);
5073 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005074 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005075 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5076 if (!iter) {
5077 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005078 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005079 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005080 ret = 0;
5081
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005082 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005083 iter->trace_buffer = &tr->max_buffer;
5084 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005085 m->private = iter;
5086 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005087 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005088out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005089 if (ret < 0)
5090 trace_array_put(tr);
5091
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005092 return ret;
5093}
5094
5095static ssize_t
5096tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5097 loff_t *ppos)
5098{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005099 struct seq_file *m = filp->private_data;
5100 struct trace_iterator *iter = m->private;
5101 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005102 unsigned long val;
5103 int ret;
5104
5105 ret = tracing_update_buffers();
5106 if (ret < 0)
5107 return ret;
5108
5109 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5110 if (ret)
5111 return ret;
5112
5113 mutex_lock(&trace_types_lock);
5114
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005115 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005116 ret = -EBUSY;
5117 goto out;
5118 }
5119
5120 switch (val) {
5121 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005122 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5123 ret = -EINVAL;
5124 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005125 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005126 if (tr->allocated_snapshot)
5127 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005128 break;
5129 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005130/* Only allow per-cpu swap if the ring buffer supports it */
5131#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5132 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5133 ret = -EINVAL;
5134 break;
5135 }
5136#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005137 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005138 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005139 if (ret < 0)
5140 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005141 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005142 local_irq_disable();
5143 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005144 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005145 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005146 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005147 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005148 local_irq_enable();
5149 break;
5150 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005151 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005152 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5153 tracing_reset_online_cpus(&tr->max_buffer);
5154 else
5155 tracing_reset(&tr->max_buffer, iter->cpu_file);
5156 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005157 break;
5158 }
5159
5160 if (ret >= 0) {
5161 *ppos += cnt;
5162 ret = cnt;
5163 }
5164out:
5165 mutex_unlock(&trace_types_lock);
5166 return ret;
5167}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005168
5169static int tracing_snapshot_release(struct inode *inode, struct file *file)
5170{
5171 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005172 int ret;
5173
5174 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005175
5176 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005177 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005178
5179 /* If write only, the seq_file is just a stub */
5180 if (m)
5181 kfree(m->private);
5182 kfree(m);
5183
5184 return 0;
5185}
5186
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005187static int tracing_buffers_open(struct inode *inode, struct file *filp);
5188static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5189 size_t count, loff_t *ppos);
5190static int tracing_buffers_release(struct inode *inode, struct file *file);
5191static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5192 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5193
5194static int snapshot_raw_open(struct inode *inode, struct file *filp)
5195{
5196 struct ftrace_buffer_info *info;
5197 int ret;
5198
5199 ret = tracing_buffers_open(inode, filp);
5200 if (ret < 0)
5201 return ret;
5202
5203 info = filp->private_data;
5204
5205 if (info->iter.trace->use_max_tr) {
5206 tracing_buffers_release(inode, filp);
5207 return -EBUSY;
5208 }
5209
5210 info->iter.snapshot = true;
5211 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5212
5213 return ret;
5214}
5215
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005216#endif /* CONFIG_TRACER_SNAPSHOT */
5217
5218
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005219static const struct file_operations tracing_thresh_fops = {
5220 .open = tracing_open_generic,
5221 .read = tracing_thresh_read,
5222 .write = tracing_thresh_write,
5223 .llseek = generic_file_llseek,
5224};
5225
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005226static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005227 .open = tracing_open_generic,
5228 .read = tracing_max_lat_read,
5229 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005230 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005231};
5232
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005233static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005234 .open = tracing_open_generic,
5235 .read = tracing_set_trace_read,
5236 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005237 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005238};
5239
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005240static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005241 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005242 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005243 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005244 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005245 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005246 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005247};
5248
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005249static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005250 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005251 .read = tracing_entries_read,
5252 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005253 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005254 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005255};
5256
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005257static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005258 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005259 .read = tracing_total_entries_read,
5260 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005261 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005262};
5263
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005264static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005265 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005266 .write = tracing_free_buffer_write,
5267 .release = tracing_free_buffer_release,
5268};
5269
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005270static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005271 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005272 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005273 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005274 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005275};
5276
Zhaolei5079f322009-08-25 16:12:56 +08005277static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005278 .open = tracing_clock_open,
5279 .read = seq_read,
5280 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005281 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005282 .write = tracing_clock_write,
5283};
5284
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005285#ifdef CONFIG_TRACER_SNAPSHOT
5286static const struct file_operations snapshot_fops = {
5287 .open = tracing_snapshot_open,
5288 .read = seq_read,
5289 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005290 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005291 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005292};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005293
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005294static const struct file_operations snapshot_raw_fops = {
5295 .open = snapshot_raw_open,
5296 .read = tracing_buffers_read,
5297 .release = tracing_buffers_release,
5298 .splice_read = tracing_buffers_splice_read,
5299 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005300};
5301
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005302#endif /* CONFIG_TRACER_SNAPSHOT */
5303
Steven Rostedt2cadf912008-12-01 22:20:19 -05005304static int tracing_buffers_open(struct inode *inode, struct file *filp)
5305{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005306 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005307 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005308 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005309
5310 if (tracing_disabled)
5311 return -ENODEV;
5312
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005313 if (trace_array_get(tr) < 0)
5314 return -ENODEV;
5315
Steven Rostedt2cadf912008-12-01 22:20:19 -05005316 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005317 if (!info) {
5318 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005319 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005320 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005321
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005322 mutex_lock(&trace_types_lock);
5323
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005324 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005325 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005326 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005327 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005328 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005329 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005330 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005331
5332 filp->private_data = info;
5333
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005334 tr->current_trace->ref++;
5335
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005336 mutex_unlock(&trace_types_lock);
5337
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005338 ret = nonseekable_open(inode, filp);
5339 if (ret < 0)
5340 trace_array_put(tr);
5341
5342 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005343}
5344
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005345static unsigned int
5346tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5347{
5348 struct ftrace_buffer_info *info = filp->private_data;
5349 struct trace_iterator *iter = &info->iter;
5350
5351 return trace_poll(iter, filp, poll_table);
5352}
5353
Steven Rostedt2cadf912008-12-01 22:20:19 -05005354static ssize_t
5355tracing_buffers_read(struct file *filp, char __user *ubuf,
5356 size_t count, loff_t *ppos)
5357{
5358 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005359 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005360 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005361 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005362
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005363 if (!count)
5364 return 0;
5365
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005366#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005367 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5368 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005369#endif
5370
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005371 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005372 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5373 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005374 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005375 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005376
Steven Rostedt2cadf912008-12-01 22:20:19 -05005377 /* Do we have previous read data to read? */
5378 if (info->read < PAGE_SIZE)
5379 goto read;
5380
Steven Rostedtb6273442013-02-28 13:44:11 -05005381 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005382 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005383 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005384 &info->spare,
5385 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005386 iter->cpu_file, 0);
5387 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005388
5389 if (ret < 0) {
5390 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005391 if ((filp->f_flags & O_NONBLOCK))
5392 return -EAGAIN;
5393
Rabin Vincente30f53a2014-11-10 19:46:34 +01005394 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005395 if (ret)
5396 return ret;
5397
Steven Rostedtb6273442013-02-28 13:44:11 -05005398 goto again;
5399 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005400 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005401 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005402
Steven Rostedt436fc282011-10-14 10:44:25 -04005403 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005404 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005405 size = PAGE_SIZE - info->read;
5406 if (size > count)
5407 size = count;
5408
5409 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005410 if (ret == size)
5411 return -EFAULT;
5412
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005413 size -= ret;
5414
Steven Rostedt2cadf912008-12-01 22:20:19 -05005415 *ppos += size;
5416 info->read += size;
5417
5418 return size;
5419}
5420
5421static int tracing_buffers_release(struct inode *inode, struct file *file)
5422{
5423 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005424 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005425
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005426 mutex_lock(&trace_types_lock);
5427
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005428 iter->tr->current_trace->ref--;
5429
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005430 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005431
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005432 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005433 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005434 kfree(info);
5435
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005436 mutex_unlock(&trace_types_lock);
5437
Steven Rostedt2cadf912008-12-01 22:20:19 -05005438 return 0;
5439}
5440
5441struct buffer_ref {
5442 struct ring_buffer *buffer;
5443 void *page;
5444 int ref;
5445};
5446
5447static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5448 struct pipe_buffer *buf)
5449{
5450 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5451
5452 if (--ref->ref)
5453 return;
5454
5455 ring_buffer_free_read_page(ref->buffer, ref->page);
5456 kfree(ref);
5457 buf->private = 0;
5458}
5459
Steven Rostedt2cadf912008-12-01 22:20:19 -05005460static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5461 struct pipe_buffer *buf)
5462{
5463 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5464
5465 ref->ref++;
5466}
5467
5468/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005469static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005470 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005471 .confirm = generic_pipe_buf_confirm,
5472 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005473 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005474 .get = buffer_pipe_buf_get,
5475};
5476
5477/*
5478 * Callback from splice_to_pipe(), if we need to release some pages
5479 * at the end of the spd in case we error'ed out in filling the pipe.
5480 */
5481static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5482{
5483 struct buffer_ref *ref =
5484 (struct buffer_ref *)spd->partial[i].private;
5485
5486 if (--ref->ref)
5487 return;
5488
5489 ring_buffer_free_read_page(ref->buffer, ref->page);
5490 kfree(ref);
5491 spd->partial[i].private = 0;
5492}
5493
5494static ssize_t
5495tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5496 struct pipe_inode_info *pipe, size_t len,
5497 unsigned int flags)
5498{
5499 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005500 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005501 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5502 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005503 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005504 .pages = pages_def,
5505 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005506 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005507 .flags = flags,
5508 .ops = &buffer_pipe_buf_ops,
5509 .spd_release = buffer_spd_release,
5510 };
5511 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005512 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005513 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005514
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005515#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005516 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5517 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005518#endif
5519
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005520 if (splice_grow_spd(pipe, &spd))
5521 return -ENOMEM;
Jens Axboe35f3d142010-05-20 10:43:18 +02005522
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005523 if (*ppos & (PAGE_SIZE - 1))
5524 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005525
5526 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005527 if (len < PAGE_SIZE)
5528 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005529 len &= PAGE_MASK;
5530 }
5531
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005532 again:
5533 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005534 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005535
Al Viroa786c062014-04-11 12:01:03 -04005536 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005537 struct page *page;
5538 int r;
5539
5540 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005541 if (!ref) {
5542 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005543 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005544 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005545
Steven Rostedt7267fa62009-04-29 00:16:21 -04005546 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005547 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005548 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005549 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005550 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005551 kfree(ref);
5552 break;
5553 }
5554
5555 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005556 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005557 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005558 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005559 kfree(ref);
5560 break;
5561 }
5562
5563 /*
5564 * zero out any left over data, this is going to
5565 * user land.
5566 */
5567 size = ring_buffer_page_len(ref->page);
5568 if (size < PAGE_SIZE)
5569 memset(ref->page + size, 0, PAGE_SIZE - size);
5570
5571 page = virt_to_page(ref->page);
5572
5573 spd.pages[i] = page;
5574 spd.partial[i].len = PAGE_SIZE;
5575 spd.partial[i].offset = 0;
5576 spd.partial[i].private = (unsigned long)ref;
5577 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005578 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005579
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005580 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005581 }
5582
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005583 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005584 spd.nr_pages = i;
5585
5586 /* did we read anything? */
5587 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005588 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005589 return ret;
Rabin Vincent07906da2014-11-06 22:26:07 +01005590
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005591 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5592 return -EAGAIN;
5593
Rabin Vincente30f53a2014-11-10 19:46:34 +01005594 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005595 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005596 return ret;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005597
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005598 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005599 }
5600
5601 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005602 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005603
Steven Rostedt2cadf912008-12-01 22:20:19 -05005604 return ret;
5605}
5606
5607static const struct file_operations tracing_buffers_fops = {
5608 .open = tracing_buffers_open,
5609 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005610 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005611 .release = tracing_buffers_release,
5612 .splice_read = tracing_buffers_splice_read,
5613 .llseek = no_llseek,
5614};
5615
Steven Rostedtc8d77182009-04-29 18:03:45 -04005616static ssize_t
5617tracing_stats_read(struct file *filp, char __user *ubuf,
5618 size_t count, loff_t *ppos)
5619{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005620 struct inode *inode = file_inode(filp);
5621 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005622 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005623 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005624 struct trace_seq *s;
5625 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005626 unsigned long long t;
5627 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005628
Li Zefane4f2d102009-06-15 10:57:28 +08005629 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005630 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005631 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005632
5633 trace_seq_init(s);
5634
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005635 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005636 trace_seq_printf(s, "entries: %ld\n", cnt);
5637
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005638 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005639 trace_seq_printf(s, "overrun: %ld\n", cnt);
5640
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005641 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005642 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5643
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005644 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005645 trace_seq_printf(s, "bytes: %ld\n", cnt);
5646
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005647 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005648 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005649 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005650 usec_rem = do_div(t, USEC_PER_SEC);
5651 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5652 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005653
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005654 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005655 usec_rem = do_div(t, USEC_PER_SEC);
5656 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5657 } else {
5658 /* counter or tsc mode for trace_clock */
5659 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005660 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005661
5662 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005663 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005664 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005665
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005666 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005667 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5668
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005669 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005670 trace_seq_printf(s, "read events: %ld\n", cnt);
5671
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005672 count = simple_read_from_buffer(ubuf, count, ppos,
5673 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005674
5675 kfree(s);
5676
5677 return count;
5678}
5679
5680static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005681 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005682 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005683 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005684 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005685};
5686
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005687#ifdef CONFIG_DYNAMIC_FTRACE
5688
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005689int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005690{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005691 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005692}
5693
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005694static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005695tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005696 size_t cnt, loff_t *ppos)
5697{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005698 static char ftrace_dyn_info_buffer[1024];
5699 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005700 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005701 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005702 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005703 int r;
5704
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005705 mutex_lock(&dyn_info_mutex);
5706 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005707
Steven Rostedta26a2a22008-10-31 00:03:22 -04005708 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005709 buf[r++] = '\n';
5710
5711 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5712
5713 mutex_unlock(&dyn_info_mutex);
5714
5715 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005716}
5717
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005718static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005719 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005720 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005721 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005722};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005723#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005724
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005725#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5726static void
5727ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005728{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005729 tracing_snapshot();
5730}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005731
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005732static void
5733ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5734{
5735 unsigned long *count = (long *)data;
5736
5737 if (!*count)
5738 return;
5739
5740 if (*count != -1)
5741 (*count)--;
5742
5743 tracing_snapshot();
5744}
5745
5746static int
5747ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5748 struct ftrace_probe_ops *ops, void *data)
5749{
5750 long count = (long)data;
5751
5752 seq_printf(m, "%ps:", (void *)ip);
5753
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005754 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005755
5756 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005757 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005758 else
5759 seq_printf(m, ":count=%ld\n", count);
5760
5761 return 0;
5762}
5763
5764static struct ftrace_probe_ops snapshot_probe_ops = {
5765 .func = ftrace_snapshot,
5766 .print = ftrace_snapshot_print,
5767};
5768
5769static struct ftrace_probe_ops snapshot_count_probe_ops = {
5770 .func = ftrace_count_snapshot,
5771 .print = ftrace_snapshot_print,
5772};
5773
5774static int
5775ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5776 char *glob, char *cmd, char *param, int enable)
5777{
5778 struct ftrace_probe_ops *ops;
5779 void *count = (void *)-1;
5780 char *number;
5781 int ret;
5782
5783 /* hash funcs only work with set_ftrace_filter */
5784 if (!enable)
5785 return -EINVAL;
5786
5787 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5788
5789 if (glob[0] == '!') {
5790 unregister_ftrace_function_probe_func(glob+1, ops);
5791 return 0;
5792 }
5793
5794 if (!param)
5795 goto out_reg;
5796
5797 number = strsep(&param, ":");
5798
5799 if (!strlen(number))
5800 goto out_reg;
5801
5802 /*
5803 * We use the callback data field (which is a pointer)
5804 * as our counter.
5805 */
5806 ret = kstrtoul(number, 0, (unsigned long *)&count);
5807 if (ret)
5808 return ret;
5809
5810 out_reg:
5811 ret = register_ftrace_function_probe(glob, ops, count);
5812
5813 if (ret >= 0)
5814 alloc_snapshot(&global_trace);
5815
5816 return ret < 0 ? ret : 0;
5817}
5818
5819static struct ftrace_func_command ftrace_snapshot_cmd = {
5820 .name = "snapshot",
5821 .func = ftrace_trace_snapshot_callback,
5822};
5823
Tom Zanussi38de93a2013-10-24 08:34:18 -05005824static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005825{
5826 return register_ftrace_command(&ftrace_snapshot_cmd);
5827}
5828#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005829static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005830#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005831
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05005832static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005833{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005834 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005835}
5836
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005837static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5838{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005839 struct dentry *d_tracer;
5840
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005841 if (tr->percpu_dir)
5842 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005843
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05005844 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05005845 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005846 return NULL;
5847
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005848 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005849
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005850 WARN_ONCE(!tr->percpu_dir,
5851 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005852
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005853 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005854}
5855
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005856static struct dentry *
5857trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5858 void *data, long cpu, const struct file_operations *fops)
5859{
5860 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5861
5862 if (ret) /* See tracing_get_cpu() */
5863 ret->d_inode->i_cdev = (void *)(cpu + 1);
5864 return ret;
5865}
5866
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005867static void
5868tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005869{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005870 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005871 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005872 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005873
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005874 if (!d_percpu)
5875 return;
5876
Steven Rostedtdd49a382010-10-20 21:51:26 -04005877 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005878 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5879 if (!d_cpu) {
5880 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5881 return;
5882 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005883
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005884 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005885 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005886 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005887
5888 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005889 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005890 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005891
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005892 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005893 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005894
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005895 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005896 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005897
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005898 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005899 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005900
5901#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005902 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005903 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005904
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005905 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005906 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005907#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005908}
5909
Steven Rostedt60a11772008-05-12 21:20:44 +02005910#ifdef CONFIG_FTRACE_SELFTEST
5911/* Let selftest have access to static functions in this file */
5912#include "trace_selftest.c"
5913#endif
5914
Steven Rostedt577b7852009-02-26 23:43:05 -05005915struct trace_option_dentry {
5916 struct tracer_opt *opt;
5917 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005918 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005919 struct dentry *entry;
5920};
5921
5922static ssize_t
5923trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5924 loff_t *ppos)
5925{
5926 struct trace_option_dentry *topt = filp->private_data;
5927 char *buf;
5928
5929 if (topt->flags->val & topt->opt->bit)
5930 buf = "1\n";
5931 else
5932 buf = "0\n";
5933
5934 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5935}
5936
5937static ssize_t
5938trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5939 loff_t *ppos)
5940{
5941 struct trace_option_dentry *topt = filp->private_data;
5942 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005943 int ret;
5944
Peter Huewe22fe9b52011-06-07 21:58:27 +02005945 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5946 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005947 return ret;
5948
Li Zefan8d18eaa2009-12-08 11:17:06 +08005949 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005950 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005951
5952 if (!!(topt->flags->val & topt->opt->bit) != val) {
5953 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005954 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005955 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005956 mutex_unlock(&trace_types_lock);
5957 if (ret)
5958 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005959 }
5960
5961 *ppos += cnt;
5962
5963 return cnt;
5964}
5965
5966
5967static const struct file_operations trace_options_fops = {
5968 .open = tracing_open_generic,
5969 .read = trace_options_read,
5970 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005971 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005972};
5973
Steven Rostedta8259072009-02-26 22:19:12 -05005974static ssize_t
5975trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5976 loff_t *ppos)
5977{
5978 long index = (long)filp->private_data;
5979 char *buf;
5980
5981 if (trace_flags & (1 << index))
5982 buf = "1\n";
5983 else
5984 buf = "0\n";
5985
5986 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5987}
5988
5989static ssize_t
5990trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5991 loff_t *ppos)
5992{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005993 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005994 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005995 unsigned long val;
5996 int ret;
5997
Peter Huewe22fe9b52011-06-07 21:58:27 +02005998 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5999 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006000 return ret;
6001
Zhaoleif2d84b62009-08-07 18:55:48 +08006002 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006003 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006004
6005 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006006 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006007 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006008
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006009 if (ret < 0)
6010 return ret;
6011
Steven Rostedta8259072009-02-26 22:19:12 -05006012 *ppos += cnt;
6013
6014 return cnt;
6015}
6016
Steven Rostedta8259072009-02-26 22:19:12 -05006017static const struct file_operations trace_options_core_fops = {
6018 .open = tracing_open_generic,
6019 .read = trace_options_core_read,
6020 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006021 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006022};
6023
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006024struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04006025 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006026 struct dentry *parent,
6027 void *data,
6028 const struct file_operations *fops)
6029{
6030 struct dentry *ret;
6031
6032 ret = debugfs_create_file(name, mode, parent, data, fops);
6033 if (!ret)
6034 pr_warning("Could not create debugfs '%s' entry\n", name);
6035
6036 return ret;
6037}
6038
6039
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006040static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006041{
6042 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006043
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006044 if (tr->options)
6045 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006046
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006047 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006048 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006049 return NULL;
6050
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006051 tr->options = debugfs_create_dir("options", d_tracer);
6052 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006053 pr_warning("Could not create debugfs directory 'options'\n");
6054 return NULL;
6055 }
6056
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006058}
6059
Steven Rostedt577b7852009-02-26 23:43:05 -05006060static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006061create_trace_option_file(struct trace_array *tr,
6062 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006063 struct tracer_flags *flags,
6064 struct tracer_opt *opt)
6065{
6066 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006067
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006068 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006069 if (!t_options)
6070 return;
6071
6072 topt->flags = flags;
6073 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006074 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006075
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006076 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006077 &trace_options_fops);
6078
Steven Rostedt577b7852009-02-26 23:43:05 -05006079}
6080
6081static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006082create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006083{
6084 struct trace_option_dentry *topts;
6085 struct tracer_flags *flags;
6086 struct tracer_opt *opts;
6087 int cnt;
6088
6089 if (!tracer)
6090 return NULL;
6091
6092 flags = tracer->flags;
6093
6094 if (!flags || !flags->opts)
6095 return NULL;
6096
6097 opts = flags->opts;
6098
6099 for (cnt = 0; opts[cnt].name; cnt++)
6100 ;
6101
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006102 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006103 if (!topts)
6104 return NULL;
6105
6106 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006107 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006108 &opts[cnt]);
6109
6110 return topts;
6111}
6112
6113static void
6114destroy_trace_option_files(struct trace_option_dentry *topts)
6115{
6116 int cnt;
6117
6118 if (!topts)
6119 return;
6120
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006121 for (cnt = 0; topts[cnt].opt; cnt++)
6122 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006123
6124 kfree(topts);
6125}
6126
Steven Rostedta8259072009-02-26 22:19:12 -05006127static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006128create_trace_option_core_file(struct trace_array *tr,
6129 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006130{
6131 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006132
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006133 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006134 if (!t_options)
6135 return NULL;
6136
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006137 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006138 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006139}
6140
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006141static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006142{
6143 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006144 int i;
6145
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006146 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006147 if (!t_options)
6148 return;
6149
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006150 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006151 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006152}
6153
Steven Rostedt499e5472012-02-22 15:50:28 -05006154static ssize_t
6155rb_simple_read(struct file *filp, char __user *ubuf,
6156 size_t cnt, loff_t *ppos)
6157{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006158 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006159 char buf[64];
6160 int r;
6161
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006162 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006163 r = sprintf(buf, "%d\n", r);
6164
6165 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6166}
6167
6168static ssize_t
6169rb_simple_write(struct file *filp, const char __user *ubuf,
6170 size_t cnt, loff_t *ppos)
6171{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006172 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006173 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006174 unsigned long val;
6175 int ret;
6176
6177 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6178 if (ret)
6179 return ret;
6180
6181 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006182 mutex_lock(&trace_types_lock);
6183 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006184 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006185 if (tr->current_trace->start)
6186 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006187 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006188 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006189 if (tr->current_trace->stop)
6190 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006191 }
6192 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006193 }
6194
6195 (*ppos)++;
6196
6197 return cnt;
6198}
6199
6200static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006201 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006202 .read = rb_simple_read,
6203 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006204 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006205 .llseek = default_llseek,
6206};
6207
Steven Rostedt277ba042012-08-03 16:10:49 -04006208struct dentry *trace_instance_dir;
6209
6210static void
6211init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6212
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006213static int
6214allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006215{
6216 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006217
6218 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6219
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006220 buf->tr = tr;
6221
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006222 buf->buffer = ring_buffer_alloc(size, rb_flags);
6223 if (!buf->buffer)
6224 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006225
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006226 buf->data = alloc_percpu(struct trace_array_cpu);
6227 if (!buf->data) {
6228 ring_buffer_free(buf->buffer);
6229 return -ENOMEM;
6230 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006231
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006232 /* Allocate the first page for all buffers */
6233 set_buffer_entries(&tr->trace_buffer,
6234 ring_buffer_size(tr->trace_buffer.buffer, 0));
6235
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006236 return 0;
6237}
6238
6239static int allocate_trace_buffers(struct trace_array *tr, int size)
6240{
6241 int ret;
6242
6243 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6244 if (ret)
6245 return ret;
6246
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006247#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006248 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6249 allocate_snapshot ? size : 1);
6250 if (WARN_ON(ret)) {
6251 ring_buffer_free(tr->trace_buffer.buffer);
6252 free_percpu(tr->trace_buffer.data);
6253 return -ENOMEM;
6254 }
6255 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006256
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006257 /*
6258 * Only the top level trace array gets its snapshot allocated
6259 * from the kernel command line.
6260 */
6261 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006262#endif
6263 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006264}
6265
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006266static void free_trace_buffer(struct trace_buffer *buf)
6267{
6268 if (buf->buffer) {
6269 ring_buffer_free(buf->buffer);
6270 buf->buffer = NULL;
6271 free_percpu(buf->data);
6272 buf->data = NULL;
6273 }
6274}
6275
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006276static void free_trace_buffers(struct trace_array *tr)
6277{
6278 if (!tr)
6279 return;
6280
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006281 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006282
6283#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006284 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006285#endif
6286}
6287
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006288static int new_instance_create(const char *name)
6289{
Steven Rostedt277ba042012-08-03 16:10:49 -04006290 struct trace_array *tr;
6291 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006292
6293 mutex_lock(&trace_types_lock);
6294
6295 ret = -EEXIST;
6296 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6297 if (tr->name && strcmp(tr->name, name) == 0)
6298 goto out_unlock;
6299 }
6300
6301 ret = -ENOMEM;
6302 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6303 if (!tr)
6304 goto out_unlock;
6305
6306 tr->name = kstrdup(name, GFP_KERNEL);
6307 if (!tr->name)
6308 goto out_free_tr;
6309
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006310 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6311 goto out_free_tr;
6312
6313 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6314
Steven Rostedt277ba042012-08-03 16:10:49 -04006315 raw_spin_lock_init(&tr->start_lock);
6316
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006317 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6318
Steven Rostedt277ba042012-08-03 16:10:49 -04006319 tr->current_trace = &nop_trace;
6320
6321 INIT_LIST_HEAD(&tr->systems);
6322 INIT_LIST_HEAD(&tr->events);
6323
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006324 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006325 goto out_free_tr;
6326
Steven Rostedt277ba042012-08-03 16:10:49 -04006327 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6328 if (!tr->dir)
6329 goto out_free_tr;
6330
6331 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006332 if (ret) {
6333 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006334 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006335 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006336
6337 init_tracer_debugfs(tr, tr->dir);
6338
6339 list_add(&tr->list, &ftrace_trace_arrays);
6340
6341 mutex_unlock(&trace_types_lock);
6342
6343 return 0;
6344
6345 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006346 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006347 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006348 kfree(tr->name);
6349 kfree(tr);
6350
6351 out_unlock:
6352 mutex_unlock(&trace_types_lock);
6353
6354 return ret;
6355
6356}
6357
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006358static int instance_delete(const char *name)
6359{
6360 struct trace_array *tr;
6361 int found = 0;
6362 int ret;
6363
6364 mutex_lock(&trace_types_lock);
6365
6366 ret = -ENODEV;
6367 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6368 if (tr->name && strcmp(tr->name, name) == 0) {
6369 found = 1;
6370 break;
6371 }
6372 }
6373 if (!found)
6374 goto out_unlock;
6375
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006376 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006377 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006378 goto out_unlock;
6379
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006380 list_del(&tr->list);
6381
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006382 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006383 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006384 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006385 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006386 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006387
6388 kfree(tr->name);
6389 kfree(tr);
6390
6391 ret = 0;
6392
6393 out_unlock:
6394 mutex_unlock(&trace_types_lock);
6395
6396 return ret;
6397}
6398
Steven Rostedt277ba042012-08-03 16:10:49 -04006399static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6400{
6401 struct dentry *parent;
6402 int ret;
6403
6404 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006405 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt277ba042012-08-03 16:10:49 -04006406 if (WARN_ON_ONCE(parent != trace_instance_dir))
6407 return -ENOENT;
6408
6409 /*
6410 * The inode mutex is locked, but debugfs_create_dir() will also
6411 * take the mutex. As the instances directory can not be destroyed
6412 * or changed in any other way, it is safe to unlock it, and
6413 * let the dentry try. If two users try to make the same dir at
6414 * the same time, then the new_instance_create() will determine the
6415 * winner.
6416 */
6417 mutex_unlock(&inode->i_mutex);
6418
6419 ret = new_instance_create(dentry->d_iname);
6420
6421 mutex_lock(&inode->i_mutex);
6422
6423 return ret;
6424}
6425
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006426static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6427{
6428 struct dentry *parent;
6429 int ret;
6430
6431 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006432 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006433 if (WARN_ON_ONCE(parent != trace_instance_dir))
6434 return -ENOENT;
6435
6436 /* The caller did a dget() on dentry */
6437 mutex_unlock(&dentry->d_inode->i_mutex);
6438
6439 /*
6440 * The inode mutex is locked, but debugfs_create_dir() will also
6441 * take the mutex. As the instances directory can not be destroyed
6442 * or changed in any other way, it is safe to unlock it, and
6443 * let the dentry try. If two users try to make the same dir at
6444 * the same time, then the instance_delete() will determine the
6445 * winner.
6446 */
6447 mutex_unlock(&inode->i_mutex);
6448
6449 ret = instance_delete(dentry->d_iname);
6450
6451 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6452 mutex_lock(&dentry->d_inode->i_mutex);
6453
6454 return ret;
6455}
6456
Steven Rostedt277ba042012-08-03 16:10:49 -04006457static const struct inode_operations instance_dir_inode_operations = {
6458 .lookup = simple_lookup,
6459 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006460 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006461};
6462
6463static __init void create_trace_instances(struct dentry *d_tracer)
6464{
6465 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6466 if (WARN_ON(!trace_instance_dir))
6467 return;
6468
6469 /* Hijack the dir inode operations, to allow mkdir */
6470 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6471}
6472
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006473static void
6474init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6475{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006476 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006477
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006478 trace_create_file("available_tracers", 0444, d_tracer,
6479 tr, &show_traces_fops);
6480
6481 trace_create_file("current_tracer", 0644, d_tracer,
6482 tr, &set_tracer_fops);
6483
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006484 trace_create_file("tracing_cpumask", 0644, d_tracer,
6485 tr, &tracing_cpumask_fops);
6486
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006487 trace_create_file("trace_options", 0644, d_tracer,
6488 tr, &tracing_iter_fops);
6489
6490 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006491 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006492
6493 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006494 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006495
6496 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006497 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006498
6499 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6500 tr, &tracing_total_entries_fops);
6501
Wang YanQing238ae932013-05-26 16:52:01 +08006502 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006503 tr, &tracing_free_buffer_fops);
6504
6505 trace_create_file("trace_marker", 0220, d_tracer,
6506 tr, &tracing_mark_fops);
6507
6508 trace_create_file("trace_clock", 0644, d_tracer, tr,
6509 &trace_clock_fops);
6510
6511 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006512 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006513
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006514#ifdef CONFIG_TRACER_MAX_TRACE
6515 trace_create_file("tracing_max_latency", 0644, d_tracer,
6516 &tr->max_latency, &tracing_max_lat_fops);
6517#endif
6518
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006519 if (ftrace_create_function_files(tr, d_tracer))
6520 WARN(1, "Could not allocate function filter files");
6521
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006522#ifdef CONFIG_TRACER_SNAPSHOT
6523 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006524 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006525#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006526
6527 for_each_tracing_cpu(cpu)
6528 tracing_init_debugfs_percpu(tr, cpu);
6529
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006530}
6531
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006532/**
6533 * tracing_init_dentry - initialize top level trace array
6534 *
6535 * This is called when creating files or directories in the tracing
6536 * directory. It is called via fs_initcall() by any of the boot up code
6537 * and expects to return the dentry of the top level tracing directory.
6538 */
6539struct dentry *tracing_init_dentry(void)
6540{
6541 struct trace_array *tr = &global_trace;
6542
6543 if (tr->dir)
6544 return tr->dir;
6545
6546 if (WARN_ON(!debugfs_initialized()))
6547 return ERR_PTR(-ENODEV);
6548
6549 tr->dir = debugfs_create_dir("tracing", NULL);
6550
6551 if (!tr->dir) {
6552 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6553 return ERR_PTR(-ENOMEM);
6554 }
6555
6556 return tr->dir;
6557}
6558
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006559extern struct trace_enum_map *__start_ftrace_enum_maps[];
6560extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6561
6562static void __init trace_enum_init(void)
6563{
6564 trace_insert_enum_map(__start_ftrace_enum_maps, __stop_ftrace_enum_maps);
6565}
6566
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006567static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006568{
6569 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006570
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006571 trace_access_lock_init();
6572
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006573 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006574 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006575 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006576
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006577 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006578
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006579 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006580 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006581
Li Zefan339ae5d2009-04-17 10:34:30 +08006582 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006583 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006584
Avadh Patel69abe6a2009-04-10 16:04:48 -04006585 trace_create_file("saved_cmdlines", 0444, d_tracer,
6586 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006587
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006588 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6589 NULL, &tracing_saved_cmdlines_size_fops);
6590
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006591 trace_enum_init();
6592
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006593#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006594 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6595 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006596#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006597
Steven Rostedt277ba042012-08-03 16:10:49 -04006598 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006599
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006600 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006601
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006602 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006603}
6604
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006605static int trace_panic_handler(struct notifier_block *this,
6606 unsigned long event, void *unused)
6607{
Steven Rostedt944ac422008-10-23 19:26:08 -04006608 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006609 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006610 return NOTIFY_OK;
6611}
6612
6613static struct notifier_block trace_panic_notifier = {
6614 .notifier_call = trace_panic_handler,
6615 .next = NULL,
6616 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6617};
6618
6619static int trace_die_handler(struct notifier_block *self,
6620 unsigned long val,
6621 void *data)
6622{
6623 switch (val) {
6624 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006625 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006626 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006627 break;
6628 default:
6629 break;
6630 }
6631 return NOTIFY_OK;
6632}
6633
6634static struct notifier_block trace_die_notifier = {
6635 .notifier_call = trace_die_handler,
6636 .priority = 200
6637};
6638
6639/*
6640 * printk is set to max of 1024, we really don't need it that big.
6641 * Nothing should be printing 1000 characters anyway.
6642 */
6643#define TRACE_MAX_PRINT 1000
6644
6645/*
6646 * Define here KERN_TRACE so that we have one place to modify
6647 * it if we decide to change what log level the ftrace dump
6648 * should be at.
6649 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006650#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006651
Jason Wessel955b61e2010-08-05 09:22:23 -05006652void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006653trace_printk_seq(struct trace_seq *s)
6654{
6655 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006656 if (s->seq.len >= TRACE_MAX_PRINT)
6657 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006658
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05006659 /*
6660 * More paranoid code. Although the buffer size is set to
6661 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6662 * an extra layer of protection.
6663 */
6664 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6665 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006666
6667 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006668 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006669
6670 printk(KERN_TRACE "%s", s->buffer);
6671
Steven Rostedtf9520752009-03-02 14:04:40 -05006672 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006673}
6674
Jason Wessel955b61e2010-08-05 09:22:23 -05006675void trace_init_global_iter(struct trace_iterator *iter)
6676{
6677 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006678 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006679 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006680 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006681
6682 if (iter->trace && iter->trace->open)
6683 iter->trace->open(iter);
6684
6685 /* Annotate start of buffers if we had overruns */
6686 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6687 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6688
6689 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6690 if (trace_clocks[iter->tr->clock_id].in_ns)
6691 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006692}
6693
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006694void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006695{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006696 /* use static because iter can be a bit big for the stack */
6697 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006698 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006699 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006700 unsigned long flags;
6701 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006702
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006703 /* Only allow one dump user at a time. */
6704 if (atomic_inc_return(&dump_running) != 1) {
6705 atomic_dec(&dump_running);
6706 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006707 }
6708
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006709 /*
6710 * Always turn off tracing when we dump.
6711 * We don't need to show trace output of what happens
6712 * between multiple crashes.
6713 *
6714 * If the user does a sysrq-z, then they can re-enable
6715 * tracing with echo 1 > tracing_on.
6716 */
6717 tracing_off();
6718
6719 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006720
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006721 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006722 trace_init_global_iter(&iter);
6723
Steven Rostedtd7690412008-10-01 00:29:53 -04006724 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006725 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006726 }
6727
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006728 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6729
Török Edwinb54d3de2008-11-22 13:28:48 +02006730 /* don't look at user memory in panic mode */
6731 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6732
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006733 switch (oops_dump_mode) {
6734 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006735 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006736 break;
6737 case DUMP_ORIG:
6738 iter.cpu_file = raw_smp_processor_id();
6739 break;
6740 case DUMP_NONE:
6741 goto out_enable;
6742 default:
6743 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006744 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006745 }
6746
6747 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006748
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006749 /* Did function tracer already get disabled? */
6750 if (ftrace_is_dead()) {
6751 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6752 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6753 }
6754
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006755 /*
6756 * We need to stop all tracing on all CPUS to read the
6757 * the next buffer. This is a bit expensive, but is
6758 * not done often. We fill all what we can read,
6759 * and then release the locks again.
6760 */
6761
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006762 while (!trace_empty(&iter)) {
6763
6764 if (!cnt)
6765 printk(KERN_TRACE "---------------------------------\n");
6766
6767 cnt++;
6768
6769 /* reset all but tr, trace, and overruns */
6770 memset(&iter.seq, 0,
6771 sizeof(struct trace_iterator) -
6772 offsetof(struct trace_iterator, seq));
6773 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6774 iter.pos = -1;
6775
Jason Wessel955b61e2010-08-05 09:22:23 -05006776 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006777 int ret;
6778
6779 ret = print_trace_line(&iter);
6780 if (ret != TRACE_TYPE_NO_CONSUME)
6781 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006782 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006783 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006784
6785 trace_printk_seq(&iter.seq);
6786 }
6787
6788 if (!cnt)
6789 printk(KERN_TRACE " (ftrace buffer empty)\n");
6790 else
6791 printk(KERN_TRACE "---------------------------------\n");
6792
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006793 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006794 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006795
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006796 for_each_tracing_cpu(cpu) {
6797 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006798 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006799 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006800 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006801}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006802EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006803
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006804__init static int tracer_alloc_buffers(void)
6805{
Steven Rostedt73c51622009-03-11 13:42:01 -04006806 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306807 int ret = -ENOMEM;
6808
6809 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6810 goto out;
6811
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006812 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306813 goto out_free_buffer_mask;
6814
Steven Rostedt07d777f2011-09-22 14:01:55 -04006815 /* Only allocate trace_printk buffers if a trace_printk exists */
6816 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006817 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006818 trace_printk_init_buffers();
6819
Steven Rostedt73c51622009-03-11 13:42:01 -04006820 /* To save memory, keep the ring buffer size to its minimum */
6821 if (ring_buffer_expanded)
6822 ring_buf_size = trace_buf_size;
6823 else
6824 ring_buf_size = 1;
6825
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306826 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006827 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006828
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006829 raw_spin_lock_init(&global_trace.start_lock);
6830
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006831 /* Used for event triggers */
6832 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6833 if (!temp_buffer)
6834 goto out_free_cpumask;
6835
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006836 if (trace_create_savedcmd() < 0)
6837 goto out_free_temp_buffer;
6838
Steven Rostedtab464282008-05-12 21:21:00 +02006839 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006840 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006841 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6842 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006843 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006844 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006845
Steven Rostedt499e5472012-02-22 15:50:28 -05006846 if (global_trace.buffer_disabled)
6847 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006848
Steven Rostedte1e232c2014-02-10 23:38:46 -05006849 if (trace_boot_clock) {
6850 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6851 if (ret < 0)
6852 pr_warning("Trace clock %s not defined, going back to default\n",
6853 trace_boot_clock);
6854 }
6855
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006856 /*
6857 * register_tracer() might reference current_trace, so it
6858 * needs to be set before we register anything. This is
6859 * just a bootstrap of current_trace anyway.
6860 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006861 global_trace.current_trace = &nop_trace;
6862
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006863 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6864
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006865 ftrace_init_global_array_ops(&global_trace);
6866
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006867 register_tracer(&nop_trace);
6868
Steven Rostedt60a11772008-05-12 21:20:44 +02006869 /* All seems OK, enable tracing */
6870 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006871
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006872 atomic_notifier_chain_register(&panic_notifier_list,
6873 &trace_panic_notifier);
6874
6875 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006876
Steven Rostedtae63b31e2012-05-03 23:09:03 -04006877 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6878
6879 INIT_LIST_HEAD(&global_trace.systems);
6880 INIT_LIST_HEAD(&global_trace.events);
6881 list_add(&global_trace.list, &ftrace_trace_arrays);
6882
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006883 while (trace_boot_options) {
6884 char *option;
6885
6886 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006887 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006888 }
6889
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006890 register_snapshot_cmd();
6891
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006892 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006893
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006894out_free_savedcmd:
6895 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006896out_free_temp_buffer:
6897 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306898out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006899 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306900out_free_buffer_mask:
6901 free_cpumask_var(tracing_buffer_mask);
6902out:
6903 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006904}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006905
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006906void __init trace_init(void)
6907{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05006908 if (tracepoint_printk) {
6909 tracepoint_print_iter =
6910 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6911 if (WARN_ON(!tracepoint_print_iter))
6912 tracepoint_printk = 0;
6913 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006914 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006915 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006916}
6917
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006918__init static int clear_boot_tracer(void)
6919{
6920 /*
6921 * The default tracer at boot buffer is an init section.
6922 * This function is called in lateinit. If we did not
6923 * find the boot tracer, then clear it out, to prevent
6924 * later registration from accessing the buffer that is
6925 * about to be freed.
6926 */
6927 if (!default_bootup_tracer)
6928 return 0;
6929
6930 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6931 default_bootup_tracer);
6932 default_bootup_tracer = NULL;
6933
6934 return 0;
6935}
6936
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006937fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006938late_initcall(clear_boot_tracer);