blob: bda9621638ccca1a08e6ff24df9dc486464d256a [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800469 int pc;
470
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800471 if (!(trace_flags & TRACE_ITER_PRINTK))
472 return 0;
473
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800474 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500475
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500476 if (unlikely(tracing_selftest_running || tracing_disabled))
477 return 0;
478
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500479 alloc = sizeof(*entry) + size + 2; /* possible \n added */
480
481 local_save_flags(irq_flags);
482 buffer = global_trace.trace_buffer.buffer;
483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800484 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500485 if (!event)
486 return 0;
487
488 entry = ring_buffer_event_data(event);
489 entry->ip = ip;
490
491 memcpy(&entry->buf, str, size);
492
493 /* Add a newline if necessary */
494 if (entry->buf[size - 1] != '\n') {
495 entry->buf[size] = '\n';
496 entry->buf[size + 1] = '\0';
497 } else
498 entry->buf[size] = '\0';
499
500 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800501 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500502
503 return size;
504}
505EXPORT_SYMBOL_GPL(__trace_puts);
506
507/**
508 * __trace_bputs - write the pointer to a constant string into trace buffer
509 * @ip: The address of the caller
510 * @str: The constant string to write to the buffer to
511 */
512int __trace_bputs(unsigned long ip, const char *str)
513{
514 struct ring_buffer_event *event;
515 struct ring_buffer *buffer;
516 struct bputs_entry *entry;
517 unsigned long irq_flags;
518 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800519 int pc;
520
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800524 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500525
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500526 if (unlikely(tracing_selftest_running || tracing_disabled))
527 return 0;
528
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500529 local_save_flags(irq_flags);
530 buffer = global_trace.trace_buffer.buffer;
531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800532 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500533 if (!event)
534 return 0;
535
536 entry = ring_buffer_event_data(event);
537 entry->ip = ip;
538 entry->str = str;
539
540 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800541 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500542
543 return 1;
544}
545EXPORT_SYMBOL_GPL(__trace_bputs);
546
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500547#ifdef CONFIG_TRACER_SNAPSHOT
548/**
549 * trace_snapshot - take a snapshot of the current buffer.
550 *
551 * This causes a swap between the snapshot buffer and the current live
552 * tracing buffer. You can use this to take snapshots of the live
553 * trace when some condition is triggered, but continue to trace.
554 *
555 * Note, make sure to allocate the snapshot with either
556 * a tracing_snapshot_alloc(), or by doing it manually
557 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
558 *
559 * If the snapshot buffer is not allocated, it will stop tracing.
560 * Basically making a permanent snapshot.
561 */
562void tracing_snapshot(void)
563{
564 struct trace_array *tr = &global_trace;
565 struct tracer *tracer = tr->current_trace;
566 unsigned long flags;
567
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500568 if (in_nmi()) {
569 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
570 internal_trace_puts("*** snapshot is being ignored ***\n");
571 return;
572 }
573
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500574 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500575 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
576 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500577 tracing_off();
578 return;
579 }
580
581 /* Note, snapshot can not be used when the tracer uses it */
582 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500583 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
584 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500585 return;
586 }
587
588 local_irq_save(flags);
589 update_max_tr(tr, current, smp_processor_id());
590 local_irq_restore(flags);
591}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500592EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500593
594static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
595 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400596static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
597
598static int alloc_snapshot(struct trace_array *tr)
599{
600 int ret;
601
602 if (!tr->allocated_snapshot) {
603
604 /* allocate spare buffer */
605 ret = resize_buffer_duplicate_size(&tr->max_buffer,
606 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
607 if (ret < 0)
608 return ret;
609
610 tr->allocated_snapshot = true;
611 }
612
613 return 0;
614}
615
Fabian Frederickad1438a2014-04-17 21:44:42 +0200616static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400617{
618 /*
619 * We don't free the ring buffer. instead, resize it because
620 * The max_tr ring buffer has some state (e.g. ring->clock) and
621 * we want preserve it.
622 */
623 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
624 set_buffer_entries(&tr->max_buffer, 1);
625 tracing_reset_online_cpus(&tr->max_buffer);
626 tr->allocated_snapshot = false;
627}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500628
629/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500630 * tracing_alloc_snapshot - allocate snapshot buffer.
631 *
632 * This only allocates the snapshot buffer if it isn't already
633 * allocated - it doesn't also take a snapshot.
634 *
635 * This is meant to be used in cases where the snapshot buffer needs
636 * to be set up for events that can't sleep but need to be able to
637 * trigger a snapshot.
638 */
639int tracing_alloc_snapshot(void)
640{
641 struct trace_array *tr = &global_trace;
642 int ret;
643
644 ret = alloc_snapshot(tr);
645 WARN_ON(ret < 0);
646
647 return ret;
648}
649EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
650
651/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500652 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
653 *
654 * This is similar to trace_snapshot(), but it will allocate the
655 * snapshot buffer if it isn't already allocated. Use this only
656 * where it is safe to sleep, as the allocation may sleep.
657 *
658 * This causes a swap between the snapshot buffer and the current live
659 * tracing buffer. You can use this to take snapshots of the live
660 * trace when some condition is triggered, but continue to trace.
661 */
662void tracing_snapshot_alloc(void)
663{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 int ret;
665
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500666 ret = tracing_alloc_snapshot();
667 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400668 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500669
670 tracing_snapshot();
671}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500672EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500673#else
674void tracing_snapshot(void)
675{
676 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
677}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500678EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500679int tracing_alloc_snapshot(void)
680{
681 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
682 return -ENODEV;
683}
684EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500685void tracing_snapshot_alloc(void)
686{
687 /* Give warning */
688 tracing_snapshot();
689}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500690EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500691#endif /* CONFIG_TRACER_SNAPSHOT */
692
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400693static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400694{
695 if (tr->trace_buffer.buffer)
696 ring_buffer_record_off(tr->trace_buffer.buffer);
697 /*
698 * This flag is looked at when buffers haven't been allocated
699 * yet, or by some tracers (like irqsoff), that just want to
700 * know if the ring buffer has been disabled, but it can handle
701 * races of where it gets disabled but we still do a record.
702 * As the check is in the fast path of the tracers, it is more
703 * important to be fast than accurate.
704 */
705 tr->buffer_disabled = 1;
706 /* Make the flag seen by readers */
707 smp_wmb();
708}
709
Steven Rostedt499e5472012-02-22 15:50:28 -0500710/**
711 * tracing_off - turn off tracing buffers
712 *
713 * This function stops the tracing buffers from recording data.
714 * It does not disable any overhead the tracers themselves may
715 * be causing. This function simply causes all recording to
716 * the ring buffers to fail.
717 */
718void tracing_off(void)
719{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400720 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500721}
722EXPORT_SYMBOL_GPL(tracing_off);
723
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400724void disable_trace_on_warning(void)
725{
726 if (__disable_trace_on_warning)
727 tracing_off();
728}
729
Steven Rostedt499e5472012-02-22 15:50:28 -0500730/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400731 * tracer_tracing_is_on - show real state of ring buffer enabled
732 * @tr : the trace array to know if ring buffer is enabled
733 *
734 * Shows real state of the ring buffer if it is enabled or not.
735 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400736static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400737{
738 if (tr->trace_buffer.buffer)
739 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
740 return !tr->buffer_disabled;
741}
742
Steven Rostedt499e5472012-02-22 15:50:28 -0500743/**
744 * tracing_is_on - show state of ring buffers enabled
745 */
746int tracing_is_on(void)
747{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400748 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500749}
750EXPORT_SYMBOL_GPL(tracing_is_on);
751
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400752static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200753{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400754 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200755
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200756 if (!str)
757 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800758 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200759 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800760 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200761 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400762 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200763 return 1;
764}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400765__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200766
Tim Bird0e950172010-02-25 15:36:43 -0800767static int __init set_tracing_thresh(char *str)
768{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800769 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800770 int ret;
771
772 if (!str)
773 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200774 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800775 if (ret < 0)
776 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800777 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800778 return 1;
779}
780__setup("tracing_thresh=", set_tracing_thresh);
781
Steven Rostedt57f50be2008-05-12 21:20:44 +0200782unsigned long nsecs_to_usecs(unsigned long nsecs)
783{
784 return nsecs / 1000;
785}
786
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200787/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200788static const char *trace_options[] = {
789 "print-parent",
790 "sym-offset",
791 "sym-addr",
792 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200793 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200794 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200795 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200796 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200797 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100798 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500799 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500800 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500801 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200802 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200803 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100804 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200805 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500806 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400807 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400808 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800809 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800810 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400811 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500812 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700813 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400814 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200815 NULL
816};
817
Zhaolei5079f322009-08-25 16:12:56 +0800818static struct {
819 u64 (*func)(void);
820 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800821 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800822} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800823 { trace_clock_local, "local", 1 },
824 { trace_clock_global, "global", 1 },
825 { trace_clock_counter, "counter", 0 },
Steven Rostedt (Red Hat)8aacf012013-03-14 13:13:45 -0400826 { trace_clock_jiffies, "uptime", 1 },
Steven Rostedt (Red Hat)76f11912013-03-14 17:53:25 -0400827 { trace_clock, "perf", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800828 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800829};
830
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200831/*
832 * trace_parser_get_init - gets the buffer for trace parser
833 */
834int trace_parser_get_init(struct trace_parser *parser, int size)
835{
836 memset(parser, 0, sizeof(*parser));
837
838 parser->buffer = kmalloc(size, GFP_KERNEL);
839 if (!parser->buffer)
840 return 1;
841
842 parser->size = size;
843 return 0;
844}
845
846/*
847 * trace_parser_put - frees the buffer for trace parser
848 */
849void trace_parser_put(struct trace_parser *parser)
850{
851 kfree(parser->buffer);
852}
853
854/*
855 * trace_get_user - reads the user input string separated by space
856 * (matched by isspace(ch))
857 *
858 * For each string found the 'struct trace_parser' is updated,
859 * and the function returns.
860 *
861 * Returns number of bytes read.
862 *
863 * See kernel/trace/trace.h for 'struct trace_parser' details.
864 */
865int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
866 size_t cnt, loff_t *ppos)
867{
868 char ch;
869 size_t read = 0;
870 ssize_t ret;
871
872 if (!*ppos)
873 trace_parser_clear(parser);
874
875 ret = get_user(ch, ubuf++);
876 if (ret)
877 goto out;
878
879 read++;
880 cnt--;
881
882 /*
883 * The parser is not finished with the last write,
884 * continue reading the user input without skipping spaces.
885 */
886 if (!parser->cont) {
887 /* skip white space */
888 while (cnt && isspace(ch)) {
889 ret = get_user(ch, ubuf++);
890 if (ret)
891 goto out;
892 read++;
893 cnt--;
894 }
895
896 /* only spaces were written */
897 if (isspace(ch)) {
898 *ppos += read;
899 ret = read;
900 goto out;
901 }
902
903 parser->idx = 0;
904 }
905
906 /* read the non-space input */
907 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800908 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200909 parser->buffer[parser->idx++] = ch;
910 else {
911 ret = -EINVAL;
912 goto out;
913 }
914 ret = get_user(ch, ubuf++);
915 if (ret)
916 goto out;
917 read++;
918 cnt--;
919 }
920
921 /* We either got finished input or we have to wait for another call. */
922 if (isspace(ch)) {
923 parser->buffer[parser->idx] = 0;
924 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400925 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200926 parser->cont = true;
927 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400928 } else {
929 ret = -EINVAL;
930 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200931 }
932
933 *ppos += read;
934 ret = read;
935
936out:
937 return ret;
938}
939
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200940ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
941{
942 int len;
943 int ret;
944
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500945 if (!cnt)
946 return 0;
947
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200948 if (s->len <= s->readpos)
949 return -EBUSY;
950
951 len = s->len - s->readpos;
952 if (cnt > len)
953 cnt = len;
954 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500955 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200956 return -EFAULT;
957
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500958 cnt -= ret;
959
Steven Rostedte74da522009-03-04 20:31:11 -0500960 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200961 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200962}
963
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200964static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200965{
966 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200967
968 if (s->len <= s->readpos)
969 return -EBUSY;
970
971 len = s->len - s->readpos;
972 if (cnt > len)
973 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300974 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200975
Steven Rostedte74da522009-03-04 20:31:11 -0500976 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200977 return cnt;
978}
979
Tim Bird0e950172010-02-25 15:36:43 -0800980unsigned long __read_mostly tracing_thresh;
981
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400982#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400983/*
984 * Copy the new maximum trace into the separate maximum-trace
985 * structure. (this way the maximum trace is permanently saved,
986 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
987 */
988static void
989__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
990{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500991 struct trace_buffer *trace_buf = &tr->trace_buffer;
992 struct trace_buffer *max_buf = &tr->max_buffer;
993 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
994 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400995
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500996 max_buf->cpu = cpu;
997 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400998
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500999 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001000 max_data->critical_start = data->critical_start;
1001 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001002
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001003 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001004 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001005 /*
1006 * If tsk == current, then use current_uid(), as that does not use
1007 * RCU. The irq tracer can be called out of RCU scope.
1008 */
1009 if (tsk == current)
1010 max_data->uid = current_uid();
1011 else
1012 max_data->uid = task_uid(tsk);
1013
Steven Rostedt8248ac02009-09-02 12:27:41 -04001014 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1015 max_data->policy = tsk->policy;
1016 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001017
1018 /* record this tasks comm */
1019 tracing_record_cmdline(tsk);
1020}
1021
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001022/**
1023 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1024 * @tr: tracer
1025 * @tsk: the task with the latency
1026 * @cpu: The cpu that initiated the trace.
1027 *
1028 * Flip the buffers between the @tr and the max_tr and record information
1029 * about which task was the cause of this latency.
1030 */
Ingo Molnare309b412008-05-12 21:20:51 +02001031void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001032update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1033{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001034 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001035
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001036 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001037 return;
1038
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001039 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001040
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001041 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001042 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001043 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001044 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001045 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001046
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001047 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001048
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001049 buf = tr->trace_buffer.buffer;
1050 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1051 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001052
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001053 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001054 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055}
1056
1057/**
1058 * update_max_tr_single - only copy one trace over, and reset the rest
1059 * @tr - tracer
1060 * @tsk - task with the latency
1061 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001062 *
1063 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001064 */
Ingo Molnare309b412008-05-12 21:20:51 +02001065void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001066update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1067{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001068 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001069
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001070 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001071 return;
1072
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001073 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001074 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001075 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001076 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001077 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001078 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001079
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001080 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001081
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001082 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001083
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001084 if (ret == -EBUSY) {
1085 /*
1086 * We failed to swap the buffer due to a commit taking
1087 * place on this CPU. We fail to record, but we reset
1088 * the max trace buffer (no one writes directly to it)
1089 * and flag that it failed.
1090 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001091 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001092 "Failed to swap buffers due to commit in progress\n");
1093 }
1094
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001095 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001096
1097 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001098 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001099}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001100#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001101
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001102static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001103{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001104 /* Iterators are static, they should be filled or empty */
1105 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001106 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001107
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001108 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001109}
1110
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001111#ifdef CONFIG_FTRACE_STARTUP_TEST
1112static int run_tracer_selftest(struct tracer *type)
1113{
1114 struct trace_array *tr = &global_trace;
1115 struct tracer *saved_tracer = tr->current_trace;
1116 int ret;
1117
1118 if (!type->selftest || tracing_selftest_disabled)
1119 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001120
1121 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001122 * Run a selftest on this tracer.
1123 * Here we reset the trace buffer, and set the current
1124 * tracer to be this tracer. The tracer can then run some
1125 * internal tracing to verify that everything is in order.
1126 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001127 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001128 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001129
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001130 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001131
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001132#ifdef CONFIG_TRACER_MAX_TRACE
1133 if (type->use_max_tr) {
1134 /* If we expanded the buffers, make sure the max is expanded too */
1135 if (ring_buffer_expanded)
1136 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1137 RING_BUFFER_ALL_CPUS);
1138 tr->allocated_snapshot = true;
1139 }
1140#endif
1141
1142 /* the test is responsible for initializing and enabling */
1143 pr_info("Testing tracer %s: ", type->name);
1144 ret = type->selftest(type, tr);
1145 /* the test is responsible for resetting too */
1146 tr->current_trace = saved_tracer;
1147 if (ret) {
1148 printk(KERN_CONT "FAILED!\n");
1149 /* Add the warning after printing 'FAILED' */
1150 WARN_ON(1);
1151 return -1;
1152 }
1153 /* Only reset on passing, to avoid touching corrupted buffers */
1154 tracing_reset_online_cpus(&tr->trace_buffer);
1155
1156#ifdef CONFIG_TRACER_MAX_TRACE
1157 if (type->use_max_tr) {
1158 tr->allocated_snapshot = false;
1159
1160 /* Shrink the max buffer again */
1161 if (ring_buffer_expanded)
1162 ring_buffer_resize(tr->max_buffer.buffer, 1,
1163 RING_BUFFER_ALL_CPUS);
1164 }
1165#endif
1166
1167 printk(KERN_CONT "PASSED\n");
1168 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001170#else
1171static inline int run_tracer_selftest(struct tracer *type)
1172{
1173 return 0;
1174}
1175#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001176
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001177/**
1178 * register_tracer - register a tracer with the ftrace system.
1179 * @type - the plugin for the tracer
1180 *
1181 * Register a new plugin tracer.
1182 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001183int register_tracer(struct tracer *type)
1184{
1185 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001186 int ret = 0;
1187
1188 if (!type->name) {
1189 pr_info("Tracer must have a name\n");
1190 return -1;
1191 }
1192
Dan Carpenter24a461d2010-07-10 12:06:44 +02001193 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001194 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1195 return -1;
1196 }
1197
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001198 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001199
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001200 tracing_selftest_running = true;
1201
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001202 for (t = trace_types; t; t = t->next) {
1203 if (strcmp(type->name, t->name) == 0) {
1204 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001205 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001206 type->name);
1207 ret = -1;
1208 goto out;
1209 }
1210 }
1211
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001212 if (!type->set_flag)
1213 type->set_flag = &dummy_set_flag;
1214 if (!type->flags)
1215 type->flags = &dummy_tracer_flags;
1216 else
1217 if (!type->flags->opts)
1218 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001219
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001220 ret = run_tracer_selftest(type);
1221 if (ret < 0)
1222 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001223
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001224 type->next = trace_types;
1225 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001226
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001227 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001228 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001229 mutex_unlock(&trace_types_lock);
1230
Steven Rostedtdac74942009-02-05 01:13:38 -05001231 if (ret || !default_bootup_tracer)
1232 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001233
Li Zefanee6c2c12009-09-18 14:06:47 +08001234 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001235 goto out_unlock;
1236
1237 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1238 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001239 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001240 default_bootup_tracer = NULL;
1241 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001242 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001243#ifdef CONFIG_FTRACE_STARTUP_TEST
1244 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1245 type->name);
1246#endif
1247
1248 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001249 return ret;
1250}
1251
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001252void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001253{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001254 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001255
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001256 if (!buffer)
1257 return;
1258
Steven Rostedtf6339032009-09-04 12:35:16 -04001259 ring_buffer_record_disable(buffer);
1260
1261 /* Make sure all commits have finished */
1262 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001263 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001264
1265 ring_buffer_record_enable(buffer);
1266}
1267
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001268void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001269{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001270 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001271 int cpu;
1272
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001273 if (!buffer)
1274 return;
1275
Steven Rostedt621968c2009-09-04 12:02:35 -04001276 ring_buffer_record_disable(buffer);
1277
1278 /* Make sure all commits have finished */
1279 synchronize_sched();
1280
Alexander Z Lam94571582013-08-02 18:36:16 -07001281 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001282
1283 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001284 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001285
1286 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001287}
1288
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001289/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001290void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001291{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001292 struct trace_array *tr;
1293
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001294 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001295 tracing_reset_online_cpus(&tr->trace_buffer);
1296#ifdef CONFIG_TRACER_MAX_TRACE
1297 tracing_reset_online_cpus(&tr->max_buffer);
1298#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001299 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001300}
1301
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001302#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001303#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001304static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001305struct saved_cmdlines_buffer {
1306 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1307 unsigned *map_cmdline_to_pid;
1308 unsigned cmdline_num;
1309 int cmdline_idx;
1310 char *saved_cmdlines;
1311};
1312static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001313
Steven Rostedt25b0b442008-05-12 21:21:00 +02001314/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001315static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001316
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001317static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001318{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001319 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1320}
1321
1322static inline void set_cmdline(int idx, const char *cmdline)
1323{
1324 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1325}
1326
1327static int allocate_cmdlines_buffer(unsigned int val,
1328 struct saved_cmdlines_buffer *s)
1329{
1330 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1331 GFP_KERNEL);
1332 if (!s->map_cmdline_to_pid)
1333 return -ENOMEM;
1334
1335 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1336 if (!s->saved_cmdlines) {
1337 kfree(s->map_cmdline_to_pid);
1338 return -ENOMEM;
1339 }
1340
1341 s->cmdline_idx = 0;
1342 s->cmdline_num = val;
1343 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1344 sizeof(s->map_pid_to_cmdline));
1345 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1346 val * sizeof(*s->map_cmdline_to_pid));
1347
1348 return 0;
1349}
1350
1351static int trace_create_savedcmd(void)
1352{
1353 int ret;
1354
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001355 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001356 if (!savedcmd)
1357 return -ENOMEM;
1358
1359 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1360 if (ret < 0) {
1361 kfree(savedcmd);
1362 savedcmd = NULL;
1363 return -ENOMEM;
1364 }
1365
1366 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001367}
1368
Carsten Emdeb5130b12009-09-13 01:43:07 +02001369int is_tracing_stopped(void)
1370{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001371 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001372}
1373
Steven Rostedt0f048702008-11-05 16:05:44 -05001374/**
1375 * tracing_start - quick start of the tracer
1376 *
1377 * If tracing is enabled but was stopped by tracing_stop,
1378 * this will start the tracer back up.
1379 */
1380void tracing_start(void)
1381{
1382 struct ring_buffer *buffer;
1383 unsigned long flags;
1384
1385 if (tracing_disabled)
1386 return;
1387
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001388 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1389 if (--global_trace.stop_count) {
1390 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001391 /* Someone screwed up their debugging */
1392 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001393 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001394 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001395 goto out;
1396 }
1397
Steven Rostedta2f80712010-03-12 19:56:00 -05001398 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001399 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001400
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001401 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001402 if (buffer)
1403 ring_buffer_record_enable(buffer);
1404
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001405#ifdef CONFIG_TRACER_MAX_TRACE
1406 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001407 if (buffer)
1408 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001409#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001410
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001411 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001412
Steven Rostedt0f048702008-11-05 16:05:44 -05001413 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001414 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1415}
1416
1417static void tracing_start_tr(struct trace_array *tr)
1418{
1419 struct ring_buffer *buffer;
1420 unsigned long flags;
1421
1422 if (tracing_disabled)
1423 return;
1424
1425 /* If global, we need to also start the max tracer */
1426 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1427 return tracing_start();
1428
1429 raw_spin_lock_irqsave(&tr->start_lock, flags);
1430
1431 if (--tr->stop_count) {
1432 if (tr->stop_count < 0) {
1433 /* Someone screwed up their debugging */
1434 WARN_ON_ONCE(1);
1435 tr->stop_count = 0;
1436 }
1437 goto out;
1438 }
1439
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001440 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001441 if (buffer)
1442 ring_buffer_record_enable(buffer);
1443
1444 out:
1445 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001446}
1447
1448/**
1449 * tracing_stop - quick stop of the tracer
1450 *
1451 * Light weight way to stop tracing. Use in conjunction with
1452 * tracing_start.
1453 */
1454void tracing_stop(void)
1455{
1456 struct ring_buffer *buffer;
1457 unsigned long flags;
1458
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001459 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1460 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001461 goto out;
1462
Steven Rostedta2f80712010-03-12 19:56:00 -05001463 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001464 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001465
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001466 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001467 if (buffer)
1468 ring_buffer_record_disable(buffer);
1469
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001470#ifdef CONFIG_TRACER_MAX_TRACE
1471 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001472 if (buffer)
1473 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001474#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001475
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001476 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001477
Steven Rostedt0f048702008-11-05 16:05:44 -05001478 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001479 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1480}
1481
1482static void tracing_stop_tr(struct trace_array *tr)
1483{
1484 struct ring_buffer *buffer;
1485 unsigned long flags;
1486
1487 /* If global, we need to also stop the max tracer */
1488 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1489 return tracing_stop();
1490
1491 raw_spin_lock_irqsave(&tr->start_lock, flags);
1492 if (tr->stop_count++)
1493 goto out;
1494
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001495 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001496 if (buffer)
1497 ring_buffer_record_disable(buffer);
1498
1499 out:
1500 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001501}
1502
Ingo Molnare309b412008-05-12 21:20:51 +02001503void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001504
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001505static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001506{
Carsten Emdea635cf02009-03-18 09:00:41 +01001507 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001508
1509 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001510 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
1512 /*
1513 * It's not the end of the world if we don't get
1514 * the lock, but we also don't want to spin
1515 * nor do we want to disable interrupts,
1516 * so if we miss here, then better luck next time.
1517 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001518 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001519 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001521 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001522 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001523 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001524
Carsten Emdea635cf02009-03-18 09:00:41 +01001525 /*
1526 * Check whether the cmdline buffer at idx has a pid
1527 * mapped. We are going to overwrite that entry so we
1528 * need to clear the map_pid_to_cmdline. Otherwise we
1529 * would read the new comm for the old pid.
1530 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001531 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001532 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001533 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001535 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1536 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001537
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001538 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001539 }
1540
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001541 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001542
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001543 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001544
1545 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001546}
1547
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001548static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001549{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001550 unsigned map;
1551
Steven Rostedt4ca530852009-03-16 19:20:15 -04001552 if (!pid) {
1553 strcpy(comm, "<idle>");
1554 return;
1555 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001556
Steven Rostedt74bf4072010-01-25 15:11:53 -05001557 if (WARN_ON_ONCE(pid < 0)) {
1558 strcpy(comm, "<XXX>");
1559 return;
1560 }
1561
Steven Rostedt4ca530852009-03-16 19:20:15 -04001562 if (pid > PID_MAX_DEFAULT) {
1563 strcpy(comm, "<...>");
1564 return;
1565 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001566
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001567 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001568 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001569 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001570 else
1571 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001572}
1573
1574void trace_find_cmdline(int pid, char comm[])
1575{
1576 preempt_disable();
1577 arch_spin_lock(&trace_cmdline_lock);
1578
1579 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001581 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001582 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001583}
1584
Ingo Molnare309b412008-05-12 21:20:51 +02001585void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001586{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001587 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588 return;
1589
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001590 if (!__this_cpu_read(trace_cmdline_save))
1591 return;
1592
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001593 if (trace_save_cmdline(tsk))
1594 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001595}
1596
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001597void
Steven Rostedt38697052008-10-01 13:14:09 -04001598tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1599 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001600{
1601 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602
Steven Rostedt777e2082008-09-29 23:02:42 -04001603 entry->preempt_count = pc & 0xff;
1604 entry->pid = (tsk) ? tsk->pid : 0;
1605 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001606#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001607 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001608#else
1609 TRACE_FLAG_IRQS_NOSUPPORT |
1610#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001611 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1612 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001613 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1614 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001615}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001616EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001617
Steven Rostedte77405a2009-09-02 14:17:06 -04001618struct ring_buffer_event *
1619trace_buffer_lock_reserve(struct ring_buffer *buffer,
1620 int type,
1621 unsigned long len,
1622 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001623{
1624 struct ring_buffer_event *event;
1625
Steven Rostedte77405a2009-09-02 14:17:06 -04001626 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001627 if (event != NULL) {
1628 struct trace_entry *ent = ring_buffer_event_data(event);
1629
1630 tracing_generic_entry_update(ent, flags, pc);
1631 ent->type = type;
1632 }
1633
1634 return event;
1635}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001636
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001637void
1638__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1639{
1640 __this_cpu_write(trace_cmdline_save, true);
1641 ring_buffer_unlock_commit(buffer, event);
1642}
1643
Steven Rostedte77405a2009-09-02 14:17:06 -04001644static inline void
1645__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1646 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001647 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001648{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001649 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001650
Steven Rostedte77405a2009-09-02 14:17:06 -04001651 ftrace_trace_stack(buffer, flags, 6, pc);
1652 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001653}
1654
Steven Rostedte77405a2009-09-02 14:17:06 -04001655void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1656 struct ring_buffer_event *event,
1657 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001658{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001659 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001660}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001661EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001662
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001663static struct ring_buffer *temp_buffer;
1664
Steven Rostedtef5580d2009-02-27 19:38:04 -05001665struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001666trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1667 struct ftrace_event_file *ftrace_file,
1668 int type, unsigned long len,
1669 unsigned long flags, int pc)
1670{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001671 struct ring_buffer_event *entry;
1672
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001673 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001674 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001675 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001676 /*
1677 * If tracing is off, but we have triggers enabled
1678 * we still need to look at the event data. Use the temp_buffer
1679 * to store the trace event for the tigger to use. It's recusive
1680 * safe and will not be recorded anywhere.
1681 */
1682 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1683 *current_rb = temp_buffer;
1684 entry = trace_buffer_lock_reserve(*current_rb,
1685 type, len, flags, pc);
1686 }
1687 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001688}
1689EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1690
1691struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001692trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1693 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001694 unsigned long flags, int pc)
1695{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001696 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001697 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001698 type, len, flags, pc);
1699}
Steven Rostedt94487d62009-05-05 19:22:53 -04001700EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001701
Steven Rostedte77405a2009-09-02 14:17:06 -04001702void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1703 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001704 unsigned long flags, int pc)
1705{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001706 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001707}
Steven Rostedt94487d62009-05-05 19:22:53 -04001708EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001709
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001710void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1711 struct ring_buffer_event *event,
1712 unsigned long flags, int pc,
1713 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001714{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001715 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001716
1717 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1718 ftrace_trace_userstack(buffer, flags, pc);
1719}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001720EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001721
Steven Rostedte77405a2009-09-02 14:17:06 -04001722void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1723 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001724{
Steven Rostedte77405a2009-09-02 14:17:06 -04001725 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001726}
Steven Rostedt12acd472009-04-17 16:01:56 -04001727EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001728
Ingo Molnare309b412008-05-12 21:20:51 +02001729void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001730trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001731 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1732 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001733{
Tom Zanussie1112b42009-03-31 00:48:49 -05001734 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001735 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001736 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001737 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001738
Steven Rostedtd7690412008-10-01 00:29:53 -04001739 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001740 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001741 return;
1742
Steven Rostedte77405a2009-09-02 14:17:06 -04001743 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001744 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001745 if (!event)
1746 return;
1747 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001748 entry->ip = ip;
1749 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001750
Tom Zanussif306cc82013-10-24 08:34:17 -05001751 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001752 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001753}
1754
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001755#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001756
1757#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1758struct ftrace_stack {
1759 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1760};
1761
1762static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1763static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1764
Steven Rostedte77405a2009-09-02 14:17:06 -04001765static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001766 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001767 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001768{
Tom Zanussie1112b42009-03-31 00:48:49 -05001769 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001770 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001771 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001772 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001773 int use_stack;
1774 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001775
1776 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001777 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001778
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001779 /*
1780 * Since events can happen in NMIs there's no safe way to
1781 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1782 * or NMI comes in, it will just have to use the default
1783 * FTRACE_STACK_SIZE.
1784 */
1785 preempt_disable_notrace();
1786
Shan Wei82146522012-11-19 13:21:01 +08001787 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001788 /*
1789 * We don't need any atomic variables, just a barrier.
1790 * If an interrupt comes in, we don't care, because it would
1791 * have exited and put the counter back to what we want.
1792 * We just need a barrier to keep gcc from moving things
1793 * around.
1794 */
1795 barrier();
1796 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001797 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001798 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1799
1800 if (regs)
1801 save_stack_trace_regs(regs, &trace);
1802 else
1803 save_stack_trace(&trace);
1804
1805 if (trace.nr_entries > size)
1806 size = trace.nr_entries;
1807 } else
1808 /* From now on, use_stack is a boolean */
1809 use_stack = 0;
1810
1811 size *= sizeof(unsigned long);
1812
1813 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1814 sizeof(*entry) + size, flags, pc);
1815 if (!event)
1816 goto out;
1817 entry = ring_buffer_event_data(event);
1818
1819 memset(&entry->caller, 0, size);
1820
1821 if (use_stack)
1822 memcpy(&entry->caller, trace.entries,
1823 trace.nr_entries * sizeof(unsigned long));
1824 else {
1825 trace.max_entries = FTRACE_STACK_ENTRIES;
1826 trace.entries = entry->caller;
1827 if (regs)
1828 save_stack_trace_regs(regs, &trace);
1829 else
1830 save_stack_trace(&trace);
1831 }
1832
1833 entry->size = trace.nr_entries;
1834
Tom Zanussif306cc82013-10-24 08:34:17 -05001835 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001836 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001837
1838 out:
1839 /* Again, don't let gcc optimize things here */
1840 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001841 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001842 preempt_enable_notrace();
1843
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001844}
1845
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001846void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1847 int skip, int pc, struct pt_regs *regs)
1848{
1849 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850 return;
1851
1852 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1853}
1854
Steven Rostedte77405a2009-09-02 14:17:06 -04001855void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1856 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001857{
1858 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1859 return;
1860
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001861 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001862}
1863
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001864void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1865 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001866{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001867 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001868}
1869
Steven Rostedt03889382009-12-11 09:48:22 -05001870/**
1871 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001872 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001873 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001874void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001875{
1876 unsigned long flags;
1877
1878 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001879 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001880
1881 local_save_flags(flags);
1882
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001883 /*
1884 * Skip 3 more, seems to get us at the caller of
1885 * this function.
1886 */
1887 skip += 3;
1888 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1889 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001890}
1891
Steven Rostedt91e86e52010-11-10 12:56:12 +01001892static DEFINE_PER_CPU(int, user_stack_count);
1893
Steven Rostedte77405a2009-09-02 14:17:06 -04001894void
1895ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001896{
Tom Zanussie1112b42009-03-31 00:48:49 -05001897 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001898 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001899 struct userstack_entry *entry;
1900 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001901
1902 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1903 return;
1904
Steven Rostedtb6345872010-03-12 20:03:30 -05001905 /*
1906 * NMIs can not handle page faults, even with fix ups.
1907 * The save user stack can (and often does) fault.
1908 */
1909 if (unlikely(in_nmi()))
1910 return;
1911
Steven Rostedt91e86e52010-11-10 12:56:12 +01001912 /*
1913 * prevent recursion, since the user stack tracing may
1914 * trigger other kernel events.
1915 */
1916 preempt_disable();
1917 if (__this_cpu_read(user_stack_count))
1918 goto out;
1919
1920 __this_cpu_inc(user_stack_count);
1921
Steven Rostedte77405a2009-09-02 14:17:06 -04001922 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001923 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001924 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001925 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001926 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001927
Steven Rostedt48659d32009-09-11 11:36:23 -04001928 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001929 memset(&entry->caller, 0, sizeof(entry->caller));
1930
1931 trace.nr_entries = 0;
1932 trace.max_entries = FTRACE_STACK_ENTRIES;
1933 trace.skip = 0;
1934 trace.entries = entry->caller;
1935
1936 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001937 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001938 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001939
Li Zefan1dbd1952010-12-09 15:47:56 +08001940 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001941 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001942 out:
1943 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001944}
1945
Hannes Eder4fd27352009-02-10 19:44:12 +01001946#ifdef UNUSED
1947static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001948{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001949 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001950}
Hannes Eder4fd27352009-02-10 19:44:12 +01001951#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001952
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001953#endif /* CONFIG_STACKTRACE */
1954
Steven Rostedt07d777f2011-09-22 14:01:55 -04001955/* created for use with alloc_percpu */
1956struct trace_buffer_struct {
1957 char buffer[TRACE_BUF_SIZE];
1958};
1959
1960static struct trace_buffer_struct *trace_percpu_buffer;
1961static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1962static struct trace_buffer_struct *trace_percpu_irq_buffer;
1963static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1964
1965/*
1966 * The buffer used is dependent on the context. There is a per cpu
1967 * buffer for normal context, softirq contex, hard irq context and
1968 * for NMI context. Thise allows for lockless recording.
1969 *
1970 * Note, if the buffers failed to be allocated, then this returns NULL
1971 */
1972static char *get_trace_buf(void)
1973{
1974 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001975
1976 /*
1977 * If we have allocated per cpu buffers, then we do not
1978 * need to do any locking.
1979 */
1980 if (in_nmi())
1981 percpu_buffer = trace_percpu_nmi_buffer;
1982 else if (in_irq())
1983 percpu_buffer = trace_percpu_irq_buffer;
1984 else if (in_softirq())
1985 percpu_buffer = trace_percpu_sirq_buffer;
1986 else
1987 percpu_buffer = trace_percpu_buffer;
1988
1989 if (!percpu_buffer)
1990 return NULL;
1991
Shan Weid8a03492012-11-13 09:53:04 +08001992 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001993}
1994
1995static int alloc_percpu_trace_buffer(void)
1996{
1997 struct trace_buffer_struct *buffers;
1998 struct trace_buffer_struct *sirq_buffers;
1999 struct trace_buffer_struct *irq_buffers;
2000 struct trace_buffer_struct *nmi_buffers;
2001
2002 buffers = alloc_percpu(struct trace_buffer_struct);
2003 if (!buffers)
2004 goto err_warn;
2005
2006 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2007 if (!sirq_buffers)
2008 goto err_sirq;
2009
2010 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2011 if (!irq_buffers)
2012 goto err_irq;
2013
2014 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2015 if (!nmi_buffers)
2016 goto err_nmi;
2017
2018 trace_percpu_buffer = buffers;
2019 trace_percpu_sirq_buffer = sirq_buffers;
2020 trace_percpu_irq_buffer = irq_buffers;
2021 trace_percpu_nmi_buffer = nmi_buffers;
2022
2023 return 0;
2024
2025 err_nmi:
2026 free_percpu(irq_buffers);
2027 err_irq:
2028 free_percpu(sirq_buffers);
2029 err_sirq:
2030 free_percpu(buffers);
2031 err_warn:
2032 WARN(1, "Could not allocate percpu trace_printk buffer");
2033 return -ENOMEM;
2034}
2035
Steven Rostedt81698832012-10-11 10:15:05 -04002036static int buffers_allocated;
2037
Steven Rostedt07d777f2011-09-22 14:01:55 -04002038void trace_printk_init_buffers(void)
2039{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002040 if (buffers_allocated)
2041 return;
2042
2043 if (alloc_percpu_trace_buffer())
2044 return;
2045
Steven Rostedt2184db42014-05-28 13:14:40 -04002046 /* trace_printk() is for debug use only. Don't use it in production. */
2047
2048 pr_warning("\n**********************************************************\n");
2049 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2050 pr_warning("** **\n");
2051 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2052 pr_warning("** **\n");
2053 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2054 pr_warning("** unsafe for produciton use. **\n");
2055 pr_warning("** **\n");
2056 pr_warning("** If you see this message and you are not debugging **\n");
2057 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2058 pr_warning("** **\n");
2059 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2060 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002061
Steven Rostedtb382ede62012-10-10 21:44:34 -04002062 /* Expand the buffers to set size */
2063 tracing_update_buffers();
2064
Steven Rostedt07d777f2011-09-22 14:01:55 -04002065 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002066
2067 /*
2068 * trace_printk_init_buffers() can be called by modules.
2069 * If that happens, then we need to start cmdline recording
2070 * directly here. If the global_trace.buffer is already
2071 * allocated here, then this was called by module code.
2072 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002073 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002074 tracing_start_cmdline_record();
2075}
2076
2077void trace_printk_start_comm(void)
2078{
2079 /* Start tracing comms if trace printk is set */
2080 if (!buffers_allocated)
2081 return;
2082 tracing_start_cmdline_record();
2083}
2084
2085static void trace_printk_start_stop_comm(int enabled)
2086{
2087 if (!buffers_allocated)
2088 return;
2089
2090 if (enabled)
2091 tracing_start_cmdline_record();
2092 else
2093 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002094}
2095
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002096/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002097 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002098 *
2099 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002100int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002101{
Tom Zanussie1112b42009-03-31 00:48:49 -05002102 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002103 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002104 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002105 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002106 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002107 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002108 char *tbuffer;
2109 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002110
2111 if (unlikely(tracing_selftest_running || tracing_disabled))
2112 return 0;
2113
2114 /* Don't pollute graph traces with trace_vprintk internals */
2115 pause_graph_tracing();
2116
2117 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002118 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002119
Steven Rostedt07d777f2011-09-22 14:01:55 -04002120 tbuffer = get_trace_buf();
2121 if (!tbuffer) {
2122 len = 0;
2123 goto out;
2124 }
2125
2126 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2127
2128 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002129 goto out;
2130
Steven Rostedt07d777f2011-09-22 14:01:55 -04002131 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002132 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002133 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002134 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2135 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002136 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002137 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002138 entry = ring_buffer_event_data(event);
2139 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002140 entry->fmt = fmt;
2141
Steven Rostedt07d777f2011-09-22 14:01:55 -04002142 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002143 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002144 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002145 ftrace_trace_stack(buffer, flags, 6, pc);
2146 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002147
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002148out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002149 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002150 unpause_graph_tracing();
2151
2152 return len;
2153}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002154EXPORT_SYMBOL_GPL(trace_vbprintk);
2155
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002156static int
2157__trace_array_vprintk(struct ring_buffer *buffer,
2158 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002159{
Tom Zanussie1112b42009-03-31 00:48:49 -05002160 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002161 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002162 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002163 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002164 unsigned long flags;
2165 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002166
2167 if (tracing_disabled || tracing_selftest_running)
2168 return 0;
2169
Steven Rostedt07d777f2011-09-22 14:01:55 -04002170 /* Don't pollute graph traces with trace_vprintk internals */
2171 pause_graph_tracing();
2172
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002173 pc = preempt_count();
2174 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002175
Steven Rostedt07d777f2011-09-22 14:01:55 -04002176
2177 tbuffer = get_trace_buf();
2178 if (!tbuffer) {
2179 len = 0;
2180 goto out;
2181 }
2182
2183 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2184 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002185 goto out;
2186
Steven Rostedt07d777f2011-09-22 14:01:55 -04002187 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002188 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002189 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002190 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002191 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002192 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002193 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002194 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002195
Steven Rostedt07d777f2011-09-22 14:01:55 -04002196 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002197 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002198 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002199 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002200 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002201 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002202 out:
2203 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002204 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002205
2206 return len;
2207}
Steven Rostedt659372d2009-09-03 19:11:07 -04002208
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002209int trace_array_vprintk(struct trace_array *tr,
2210 unsigned long ip, const char *fmt, va_list args)
2211{
2212 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2213}
2214
2215int trace_array_printk(struct trace_array *tr,
2216 unsigned long ip, const char *fmt, ...)
2217{
2218 int ret;
2219 va_list ap;
2220
2221 if (!(trace_flags & TRACE_ITER_PRINTK))
2222 return 0;
2223
2224 va_start(ap, fmt);
2225 ret = trace_array_vprintk(tr, ip, fmt, ap);
2226 va_end(ap);
2227 return ret;
2228}
2229
2230int trace_array_printk_buf(struct ring_buffer *buffer,
2231 unsigned long ip, const char *fmt, ...)
2232{
2233 int ret;
2234 va_list ap;
2235
2236 if (!(trace_flags & TRACE_ITER_PRINTK))
2237 return 0;
2238
2239 va_start(ap, fmt);
2240 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2241 va_end(ap);
2242 return ret;
2243}
2244
Steven Rostedt659372d2009-09-03 19:11:07 -04002245int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2246{
Steven Rostedta813a152009-10-09 01:41:35 -04002247 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002248}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002249EXPORT_SYMBOL_GPL(trace_vprintk);
2250
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002251static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002252{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2254
Steven Rostedt5a90f572008-09-03 17:42:51 -04002255 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002256 if (buf_iter)
2257 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002258}
2259
Ingo Molnare309b412008-05-12 21:20:51 +02002260static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002261peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2262 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002263{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002264 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002265 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002266
Steven Rostedtd7690412008-10-01 00:29:53 -04002267 if (buf_iter)
2268 event = ring_buffer_iter_peek(buf_iter, ts);
2269 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002270 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002271 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002272
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002273 if (event) {
2274 iter->ent_size = ring_buffer_event_length(event);
2275 return ring_buffer_event_data(event);
2276 }
2277 iter->ent_size = 0;
2278 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002279}
Steven Rostedtd7690412008-10-01 00:29:53 -04002280
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002281static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002282__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2283 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002284{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002285 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002286 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002287 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002288 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002289 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002290 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002291 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002292 int cpu;
2293
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002294 /*
2295 * If we are in a per_cpu trace file, don't bother by iterating over
2296 * all cpu and peek directly.
2297 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002298 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002299 if (ring_buffer_empty_cpu(buffer, cpu_file))
2300 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002301 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002302 if (ent_cpu)
2303 *ent_cpu = cpu_file;
2304
2305 return ent;
2306 }
2307
Steven Rostedtab464282008-05-12 21:21:00 +02002308 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002309
2310 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002311 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002312
Steven Rostedtbc21b472010-03-31 19:49:26 -04002313 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002314
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02002315 /*
2316 * Pick the entry with the smallest timestamp:
2317 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002318 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002319 next = ent;
2320 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002321 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002322 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002323 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002324 }
2325 }
2326
Steven Rostedt12b5da32012-03-27 10:43:28 -04002327 iter->ent_size = next_size;
2328
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002329 if (ent_cpu)
2330 *ent_cpu = next_cpu;
2331
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002332 if (ent_ts)
2333 *ent_ts = next_ts;
2334
Steven Rostedtbc21b472010-03-31 19:49:26 -04002335 if (missing_events)
2336 *missing_events = next_lost;
2337
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002338 return next;
2339}
2340
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002341/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002342struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2343 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002344{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002345 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002346}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002347
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002348/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002349void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002350{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002351 iter->ent = __find_next_entry(iter, &iter->cpu,
2352 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002353
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002354 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002355 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002356
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002357 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002358}
2359
Ingo Molnare309b412008-05-12 21:20:51 +02002360static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002361{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002362 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002363 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002364}
2365
Ingo Molnare309b412008-05-12 21:20:51 +02002366static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002367{
2368 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002369 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002370 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002371
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002372 WARN_ON_ONCE(iter->leftover);
2373
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002374 (*pos)++;
2375
2376 /* can't go backwards */
2377 if (iter->idx > i)
2378 return NULL;
2379
2380 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002381 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002382 else
2383 ent = iter;
2384
2385 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002386 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002387
2388 iter->pos = *pos;
2389
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002390 return ent;
2391}
2392
Jason Wessel955b61e2010-08-05 09:22:23 -05002393void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002394{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002395 struct ring_buffer_event *event;
2396 struct ring_buffer_iter *buf_iter;
2397 unsigned long entries = 0;
2398 u64 ts;
2399
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002400 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002401
Steven Rostedt6d158a82012-06-27 20:46:14 -04002402 buf_iter = trace_buffer_iter(iter, cpu);
2403 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002404 return;
2405
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002406 ring_buffer_iter_reset(buf_iter);
2407
2408 /*
2409 * We could have the case with the max latency tracers
2410 * that a reset never took place on a cpu. This is evident
2411 * by the timestamp being before the start of the buffer.
2412 */
2413 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002414 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002415 break;
2416 entries++;
2417 ring_buffer_read(buf_iter, NULL);
2418 }
2419
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002420 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002421}
2422
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002423/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002424 * The current tracer is copied to avoid a global locking
2425 * all around.
2426 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002427static void *s_start(struct seq_file *m, loff_t *pos)
2428{
2429 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002430 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002431 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002432 void *p = NULL;
2433 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002434 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002435
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002436 /*
2437 * copy the tracer to avoid using a global lock all around.
2438 * iter->trace is a copy of current_trace, the pointer to the
2439 * name may be used instead of a strcmp(), as iter->trace->name
2440 * will point to the same string as current_trace->name.
2441 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002443 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2444 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002445 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002446
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002447#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002448 if (iter->snapshot && iter->trace->use_max_tr)
2449 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002450#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002451
2452 if (!iter->snapshot)
2453 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002454
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002455 if (*pos != iter->pos) {
2456 iter->ent = NULL;
2457 iter->cpu = 0;
2458 iter->idx = -1;
2459
Steven Rostedtae3b5092013-01-23 15:22:59 -05002460 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002461 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002462 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002463 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002464 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465
Lai Jiangshanac91d852010-03-02 17:54:50 +08002466 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002467 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2468 ;
2469
2470 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002471 /*
2472 * If we overflowed the seq_file before, then we want
2473 * to just reuse the trace_seq buffer again.
2474 */
2475 if (iter->leftover)
2476 p = iter;
2477 else {
2478 l = *pos - 1;
2479 p = s_next(m, p, &l);
2480 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002481 }
2482
Lai Jiangshan4f535962009-05-18 19:35:34 +08002483 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002484 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002485 return p;
2486}
2487
2488static void s_stop(struct seq_file *m, void *p)
2489{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002490 struct trace_iterator *iter = m->private;
2491
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002492#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002493 if (iter->snapshot && iter->trace->use_max_tr)
2494 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002495#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002496
2497 if (!iter->snapshot)
2498 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002499
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002500 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002501 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002502}
2503
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002504static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002505get_total_entries(struct trace_buffer *buf,
2506 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002507{
2508 unsigned long count;
2509 int cpu;
2510
2511 *total = 0;
2512 *entries = 0;
2513
2514 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002515 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002516 /*
2517 * If this buffer has skipped entries, then we hold all
2518 * entries for the trace and we need to ignore the
2519 * ones before the time stamp.
2520 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002521 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2522 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002523 /* total is the same as the entries */
2524 *total += count;
2525 } else
2526 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002527 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002528 *entries += count;
2529 }
2530}
2531
Ingo Molnare309b412008-05-12 21:20:51 +02002532static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002533{
Michael Ellermana6168352008-08-20 16:36:11 -07002534 seq_puts(m, "# _------=> CPU# \n");
2535 seq_puts(m, "# / _-----=> irqs-off \n");
2536 seq_puts(m, "# | / _----=> need-resched \n");
2537 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2538 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002539 seq_puts(m, "# |||| / delay \n");
2540 seq_puts(m, "# cmd pid ||||| time | caller \n");
2541 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002542}
2543
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002544static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002545{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002546 unsigned long total;
2547 unsigned long entries;
2548
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002549 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002550 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2551 entries, total, num_online_cpus());
2552 seq_puts(m, "#\n");
2553}
2554
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002555static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002556{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002557 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002558 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002559 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002560}
2561
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002562static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002563{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002564 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002565 seq_puts(m, "# _-----=> irqs-off\n");
2566 seq_puts(m, "# / _----=> need-resched\n");
2567 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2568 seq_puts(m, "# || / _--=> preempt-depth\n");
2569 seq_puts(m, "# ||| / delay\n");
2570 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2571 seq_puts(m, "# | | | |||| | |\n");
2572}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002573
Jiri Olsa62b915f2010-04-02 19:01:22 +02002574void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002575print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2576{
2577 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002578 struct trace_buffer *buf = iter->trace_buffer;
2579 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002580 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002581 unsigned long entries;
2582 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002583 const char *name = "preemption";
2584
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002585 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002586
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002587 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002588
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002589 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002590 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002591 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002592 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002593 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002594 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002595 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002596 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002597 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002598 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599#if defined(CONFIG_PREEMPT_NONE)
2600 "server",
2601#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2602 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002603#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002604 "preempt",
2605#else
2606 "unknown",
2607#endif
2608 /* These are reserved for later use */
2609 0, 0, 0, 0);
2610#ifdef CONFIG_SMP
2611 seq_printf(m, " #P:%d)\n", num_online_cpus());
2612#else
2613 seq_puts(m, ")\n");
2614#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002615 seq_puts(m, "# -----------------\n");
2616 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002617 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002618 data->comm, data->pid,
2619 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002620 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002621 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002622
2623 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002624 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002625 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2626 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002627 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002628 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2629 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002630 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002631 }
2632
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002633 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002634}
2635
Steven Rostedta3097202008-11-07 22:36:02 -05002636static void test_cpu_buff_start(struct trace_iterator *iter)
2637{
2638 struct trace_seq *s = &iter->seq;
2639
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002640 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2641 return;
2642
2643 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2644 return;
2645
Rusty Russell44623442009-01-01 10:12:23 +10302646 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002647 return;
2648
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002649 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002650 return;
2651
Rusty Russell44623442009-01-01 10:12:23 +10302652 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002653
2654 /* Don't print started cpu buffer for the first entry of the trace */
2655 if (iter->idx > 1)
2656 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2657 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002658}
2659
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002660static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002661{
Steven Rostedt214023c2008-05-12 21:20:46 +02002662 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002663 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002664 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002665 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002666
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002667 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002668
Steven Rostedta3097202008-11-07 22:36:02 -05002669 test_cpu_buff_start(iter);
2670
Steven Rostedtf633cef2008-12-23 23:24:13 -05002671 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002672
2673 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002674 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2675 if (!trace_print_lat_context(iter))
2676 goto partial;
2677 } else {
2678 if (!trace_print_context(iter))
2679 goto partial;
2680 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002681 }
2682
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002683 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002684 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002685
2686 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2687 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002688
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002689 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002690partial:
2691 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002692}
2693
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002694static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002695{
2696 struct trace_seq *s = &iter->seq;
2697 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002698 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002699
2700 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002701
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002702 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002703 if (!trace_seq_printf(s, "%d %d %llu ",
2704 entry->pid, iter->cpu, iter->ts))
2705 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002706 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002707
Steven Rostedtf633cef2008-12-23 23:24:13 -05002708 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002709 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002710 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002711
2712 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2713 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002714
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002715 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002716partial:
2717 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002718}
2719
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002720static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002721{
2722 struct trace_seq *s = &iter->seq;
2723 unsigned char newline = '\n';
2724 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002725 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002726
2727 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002728
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002729 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2730 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2731 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2732 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2733 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002734
Steven Rostedtf633cef2008-12-23 23:24:13 -05002735 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002736 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002737 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002738 if (ret != TRACE_TYPE_HANDLED)
2739 return ret;
2740 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002741
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002742 SEQ_PUT_FIELD_RET(s, newline);
2743
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002744 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002745}
2746
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002747static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002748{
2749 struct trace_seq *s = &iter->seq;
2750 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002751 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002752
2753 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002754
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002755 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2756 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b522009-02-07 19:38:43 -05002757 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002758 SEQ_PUT_FIELD_RET(s, iter->ts);
2759 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002760
Steven Rostedtf633cef2008-12-23 23:24:13 -05002761 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002762 return event ? event->funcs->binary(iter, 0, event) :
2763 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002764}
2765
Jiri Olsa62b915f2010-04-02 19:01:22 +02002766int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002767{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002768 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002769 int cpu;
2770
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002771 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002772 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002773 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002774 buf_iter = trace_buffer_iter(iter, cpu);
2775 if (buf_iter) {
2776 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002777 return 0;
2778 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002779 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002780 return 0;
2781 }
2782 return 1;
2783 }
2784
Steven Rostedtab464282008-05-12 21:21:00 +02002785 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002786 buf_iter = trace_buffer_iter(iter, cpu);
2787 if (buf_iter) {
2788 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002789 return 0;
2790 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002791 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002792 return 0;
2793 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002794 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002795
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002796 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002797}
2798
Lai Jiangshan4f535962009-05-18 19:35:34 +08002799/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002800enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002801{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002802 enum print_line_t ret;
2803
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002804 if (iter->lost_events &&
2805 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2806 iter->cpu, iter->lost_events))
2807 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002808
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002809 if (iter->trace && iter->trace->print_line) {
2810 ret = iter->trace->print_line(iter);
2811 if (ret != TRACE_TYPE_UNHANDLED)
2812 return ret;
2813 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002814
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002815 if (iter->ent->type == TRACE_BPUTS &&
2816 trace_flags & TRACE_ITER_PRINTK &&
2817 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2818 return trace_print_bputs_msg_only(iter);
2819
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002820 if (iter->ent->type == TRACE_BPRINT &&
2821 trace_flags & TRACE_ITER_PRINTK &&
2822 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002823 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002824
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002825 if (iter->ent->type == TRACE_PRINT &&
2826 trace_flags & TRACE_ITER_PRINTK &&
2827 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002828 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002829
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002830 if (trace_flags & TRACE_ITER_BIN)
2831 return print_bin_fmt(iter);
2832
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002833 if (trace_flags & TRACE_ITER_HEX)
2834 return print_hex_fmt(iter);
2835
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002836 if (trace_flags & TRACE_ITER_RAW)
2837 return print_raw_fmt(iter);
2838
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002839 return print_trace_fmt(iter);
2840}
2841
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002842void trace_latency_header(struct seq_file *m)
2843{
2844 struct trace_iterator *iter = m->private;
2845
2846 /* print nothing if the buffers are empty */
2847 if (trace_empty(iter))
2848 return;
2849
2850 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2851 print_trace_header(m, iter);
2852
2853 if (!(trace_flags & TRACE_ITER_VERBOSE))
2854 print_lat_help_header(m);
2855}
2856
Jiri Olsa62b915f2010-04-02 19:01:22 +02002857void trace_default_header(struct seq_file *m)
2858{
2859 struct trace_iterator *iter = m->private;
2860
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002861 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2862 return;
2863
Jiri Olsa62b915f2010-04-02 19:01:22 +02002864 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2865 /* print nothing if the buffers are empty */
2866 if (trace_empty(iter))
2867 return;
2868 print_trace_header(m, iter);
2869 if (!(trace_flags & TRACE_ITER_VERBOSE))
2870 print_lat_help_header(m);
2871 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002872 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2873 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002874 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002875 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002876 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002877 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002878 }
2879}
2880
Steven Rostedte0a413f2011-09-29 21:26:16 -04002881static void test_ftrace_alive(struct seq_file *m)
2882{
2883 if (!ftrace_is_dead())
2884 return;
2885 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2886 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2887}
2888
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002889#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002890static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002891{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002892 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2893 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2894 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002895 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002896 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2897 seq_printf(m, "# is not a '0' or '1')\n");
2898}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002899
2900static void show_snapshot_percpu_help(struct seq_file *m)
2901{
2902 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2903#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2904 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2905 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2906#else
2907 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2908 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2909#endif
2910 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2911 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2912 seq_printf(m, "# is not a '0' or '1')\n");
2913}
2914
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002915static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2916{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002917 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002918 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2919 else
2920 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2921
2922 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002923 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2924 show_snapshot_main_help(m);
2925 else
2926 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002927}
2928#else
2929/* Should never be called */
2930static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2931#endif
2932
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002933static int s_show(struct seq_file *m, void *v)
2934{
2935 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002936 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002937
2938 if (iter->ent == NULL) {
2939 if (iter->tr) {
2940 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2941 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002942 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002943 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002944 if (iter->snapshot && trace_empty(iter))
2945 print_snapshot_help(m, iter);
2946 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002947 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002948 else
2949 trace_default_header(m);
2950
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002951 } else if (iter->leftover) {
2952 /*
2953 * If we filled the seq_file buffer earlier, we
2954 * want to just show it now.
2955 */
2956 ret = trace_print_seq(m, &iter->seq);
2957
2958 /* ret should this time be zero, but you never know */
2959 iter->leftover = ret;
2960
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002961 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002962 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002963 ret = trace_print_seq(m, &iter->seq);
2964 /*
2965 * If we overflow the seq_file buffer, then it will
2966 * ask us for this data again at start up.
2967 * Use that instead.
2968 * ret is 0 if seq_file write succeeded.
2969 * -1 otherwise.
2970 */
2971 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002972 }
2973
2974 return 0;
2975}
2976
Oleg Nesterov649e9c702013-07-23 17:25:54 +02002977/*
2978 * Should be used after trace_array_get(), trace_types_lock
2979 * ensures that i_cdev was already initialized.
2980 */
2981static inline int tracing_get_cpu(struct inode *inode)
2982{
2983 if (inode->i_cdev) /* See trace_create_cpu_file() */
2984 return (long)inode->i_cdev - 1;
2985 return RING_BUFFER_ALL_CPUS;
2986}
2987
James Morris88e9d342009-09-22 16:43:43 -07002988static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002989 .start = s_start,
2990 .next = s_next,
2991 .stop = s_stop,
2992 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002993};
2994
Ingo Molnare309b412008-05-12 21:20:51 +02002995static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002996__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002997{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002998 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002999 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003000 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003001
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003002 if (tracing_disabled)
3003 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003004
Jiri Olsa50e18b92012-04-25 10:23:39 +02003005 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003006 if (!iter)
3007 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003008
Steven Rostedt6d158a82012-06-27 20:46:14 -04003009 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3010 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003011 if (!iter->buffer_iter)
3012 goto release;
3013
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003014 /*
3015 * We make a copy of the current tracer to avoid concurrent
3016 * changes on it while we are reading.
3017 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003018 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003019 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003020 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003021 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003022
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003023 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003024
Li Zefan79f55992009-06-15 14:58:26 +08003025 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003026 goto fail;
3027
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003028 iter->tr = tr;
3029
3030#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003031 /* Currently only the top directory has a snapshot */
3032 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003033 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003034 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003035#endif
3036 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003037 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003038 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003039 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003040 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003041
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003042 /* Notify the tracer early; before we stop tracing. */
3043 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003044 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003045
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003046 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003047 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003048 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3049
David Sharp8be07092012-11-13 12:18:22 -08003050 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003051 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003052 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3053
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003054 /* stop the trace while dumping if we are not opening "snapshot" */
3055 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003056 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003057
Steven Rostedtae3b5092013-01-23 15:22:59 -05003058 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003059 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003060 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003061 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003062 }
3063 ring_buffer_read_prepare_sync();
3064 for_each_tracing_cpu(cpu) {
3065 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003066 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003067 }
3068 } else {
3069 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003070 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003071 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003072 ring_buffer_read_prepare_sync();
3073 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003074 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003075 }
3076
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003077 mutex_unlock(&trace_types_lock);
3078
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003079 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003080
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003081 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003082 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003083 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003084 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003085release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003086 seq_release_private(inode, file);
3087 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003088}
3089
3090int tracing_open_generic(struct inode *inode, struct file *filp)
3091{
Steven Rostedt60a11772008-05-12 21:20:44 +02003092 if (tracing_disabled)
3093 return -ENODEV;
3094
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003095 filp->private_data = inode->i_private;
3096 return 0;
3097}
3098
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003099bool tracing_is_disabled(void)
3100{
3101 return (tracing_disabled) ? true: false;
3102}
3103
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003104/*
3105 * Open and update trace_array ref count.
3106 * Must have the current trace_array passed to it.
3107 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003108static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003109{
3110 struct trace_array *tr = inode->i_private;
3111
3112 if (tracing_disabled)
3113 return -ENODEV;
3114
3115 if (trace_array_get(tr) < 0)
3116 return -ENODEV;
3117
3118 filp->private_data = inode->i_private;
3119
3120 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003121}
3122
Hannes Eder4fd27352009-02-10 19:44:12 +01003123static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003124{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003125 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003126 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003127 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003128 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003129
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003130 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003131 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003132 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003133 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003134
Oleg Nesterov6484c712013-07-23 17:26:10 +02003135 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003136 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003137 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003138
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003139 for_each_tracing_cpu(cpu) {
3140 if (iter->buffer_iter[cpu])
3141 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3142 }
3143
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003144 if (iter->trace && iter->trace->close)
3145 iter->trace->close(iter);
3146
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003147 if (!iter->snapshot)
3148 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003149 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003150
3151 __trace_array_put(tr);
3152
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003153 mutex_unlock(&trace_types_lock);
3154
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003155 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003156 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003157 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003158 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003159 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003160
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003161 return 0;
3162}
3163
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003164static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3165{
3166 struct trace_array *tr = inode->i_private;
3167
3168 trace_array_put(tr);
3169 return 0;
3170}
3171
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003172static int tracing_single_release_tr(struct inode *inode, struct file *file)
3173{
3174 struct trace_array *tr = inode->i_private;
3175
3176 trace_array_put(tr);
3177
3178 return single_release(inode, file);
3179}
3180
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003181static int tracing_open(struct inode *inode, struct file *file)
3182{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003183 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003184 struct trace_iterator *iter;
3185 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003186
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003187 if (trace_array_get(tr) < 0)
3188 return -ENODEV;
3189
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003190 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003191 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3192 int cpu = tracing_get_cpu(inode);
3193
3194 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003195 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003196 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003197 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003198 }
3199
3200 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003201 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003202 if (IS_ERR(iter))
3203 ret = PTR_ERR(iter);
3204 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3205 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3206 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003207
3208 if (ret < 0)
3209 trace_array_put(tr);
3210
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003211 return ret;
3212}
3213
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003214/*
3215 * Some tracers are not suitable for instance buffers.
3216 * A tracer is always available for the global array (toplevel)
3217 * or if it explicitly states that it is.
3218 */
3219static bool
3220trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3221{
3222 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3223}
3224
3225/* Find the next tracer that this trace array may use */
3226static struct tracer *
3227get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3228{
3229 while (t && !trace_ok_for_array(t, tr))
3230 t = t->next;
3231
3232 return t;
3233}
3234
Ingo Molnare309b412008-05-12 21:20:51 +02003235static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003236t_next(struct seq_file *m, void *v, loff_t *pos)
3237{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003238 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003239 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003240
3241 (*pos)++;
3242
3243 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003244 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003245
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003246 return t;
3247}
3248
3249static void *t_start(struct seq_file *m, loff_t *pos)
3250{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003251 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003252 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003253 loff_t l = 0;
3254
3255 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003256
3257 t = get_tracer_for_array(tr, trace_types);
3258 for (; t && l < *pos; t = t_next(m, t, &l))
3259 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003260
3261 return t;
3262}
3263
3264static void t_stop(struct seq_file *m, void *p)
3265{
3266 mutex_unlock(&trace_types_lock);
3267}
3268
3269static int t_show(struct seq_file *m, void *v)
3270{
3271 struct tracer *t = v;
3272
3273 if (!t)
3274 return 0;
3275
3276 seq_printf(m, "%s", t->name);
3277 if (t->next)
3278 seq_putc(m, ' ');
3279 else
3280 seq_putc(m, '\n');
3281
3282 return 0;
3283}
3284
James Morris88e9d342009-09-22 16:43:43 -07003285static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003286 .start = t_start,
3287 .next = t_next,
3288 .stop = t_stop,
3289 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003290};
3291
3292static int show_traces_open(struct inode *inode, struct file *file)
3293{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003294 struct trace_array *tr = inode->i_private;
3295 struct seq_file *m;
3296 int ret;
3297
Steven Rostedt60a11772008-05-12 21:20:44 +02003298 if (tracing_disabled)
3299 return -ENODEV;
3300
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003301 ret = seq_open(file, &show_traces_seq_ops);
3302 if (ret)
3303 return ret;
3304
3305 m = file->private_data;
3306 m->private = tr;
3307
3308 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003309}
3310
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003311static ssize_t
3312tracing_write_stub(struct file *filp, const char __user *ubuf,
3313 size_t count, loff_t *ppos)
3314{
3315 return count;
3316}
3317
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003318loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003319{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003320 int ret;
3321
Slava Pestov364829b2010-11-24 15:13:16 -08003322 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003323 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003324 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003325 file->f_pos = ret = 0;
3326
3327 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003328}
3329
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003330static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003331 .open = tracing_open,
3332 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003333 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003334 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003335 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003336};
3337
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003338static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003339 .open = show_traces_open,
3340 .read = seq_read,
3341 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003342 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003343};
3344
Ingo Molnar36dfe922008-05-12 21:20:52 +02003345/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003346 * The tracer itself will not take this lock, but still we want
3347 * to provide a consistent cpumask to user-space:
3348 */
3349static DEFINE_MUTEX(tracing_cpumask_update_lock);
3350
3351/*
3352 * Temporary storage for the character representation of the
3353 * CPU bitmask (and one more byte for the newline):
3354 */
3355static char mask_str[NR_CPUS + 1];
3356
Ingo Molnarc7078de2008-05-12 21:20:52 +02003357static ssize_t
3358tracing_cpumask_read(struct file *filp, char __user *ubuf,
3359 size_t count, loff_t *ppos)
3360{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003361 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003362 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003363
3364 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003365
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003366 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003367 if (count - len < 2) {
3368 count = -EINVAL;
3369 goto out_err;
3370 }
3371 len += sprintf(mask_str + len, "\n");
3372 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3373
3374out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003375 mutex_unlock(&tracing_cpumask_update_lock);
3376
3377 return count;
3378}
3379
3380static ssize_t
3381tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3382 size_t count, loff_t *ppos)
3383{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003384 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303385 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003386 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303387
3388 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3389 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003390
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303391 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003392 if (err)
3393 goto err_unlock;
3394
Li Zefan215368e2009-06-15 10:56:42 +08003395 mutex_lock(&tracing_cpumask_update_lock);
3396
Steven Rostedta5e25882008-12-02 15:34:05 -05003397 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003398 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003399 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003400 /*
3401 * Increase/decrease the disabled counter if we are
3402 * about to flip a bit in the cpumask:
3403 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003404 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303405 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003406 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3407 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003408 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003409 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303410 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003411 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3412 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003413 }
3414 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003415 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003416 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003417
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003418 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003419
Ingo Molnarc7078de2008-05-12 21:20:52 +02003420 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303421 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003422
Ingo Molnarc7078de2008-05-12 21:20:52 +02003423 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003424
3425err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003426 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003427
3428 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003429}
3430
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003431static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003432 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003433 .read = tracing_cpumask_read,
3434 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003435 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003436 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003437};
3438
Li Zefanfdb372e2009-12-08 11:15:59 +08003439static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003440{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003441 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003442 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003443 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003444 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003445
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003446 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003447 tracer_flags = tr->current_trace->flags->val;
3448 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003449
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003450 for (i = 0; trace_options[i]; i++) {
3451 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003452 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003453 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003454 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003455 }
3456
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003457 for (i = 0; trace_opts[i].name; i++) {
3458 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003459 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003460 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003461 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003462 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003463 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003464
Li Zefanfdb372e2009-12-08 11:15:59 +08003465 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003466}
3467
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003468static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003469 struct tracer_flags *tracer_flags,
3470 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003471{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003472 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003473 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003474
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003475 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003476 if (ret)
3477 return ret;
3478
3479 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003480 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003481 else
Zhaolei77708412009-08-07 18:53:21 +08003482 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003483 return 0;
3484}
3485
Li Zefan8d18eaa2009-12-08 11:17:06 +08003486/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003487static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003488{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003489 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003490 struct tracer_flags *tracer_flags = trace->flags;
3491 struct tracer_opt *opts = NULL;
3492 int i;
3493
3494 for (i = 0; tracer_flags->opts[i].name; i++) {
3495 opts = &tracer_flags->opts[i];
3496
3497 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003498 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003499 }
3500
3501 return -EINVAL;
3502}
3503
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003504/* Some tracers require overwrite to stay enabled */
3505int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3506{
3507 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3508 return -1;
3509
3510 return 0;
3511}
3512
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003513int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003514{
3515 /* do nothing if flag is already set */
3516 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003517 return 0;
3518
3519 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003520 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003521 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003522 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003523
3524 if (enabled)
3525 trace_flags |= mask;
3526 else
3527 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003528
3529 if (mask == TRACE_ITER_RECORD_CMD)
3530 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003531
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003532 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003533 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003534#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003535 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003536#endif
3537 }
Steven Rostedt81698832012-10-11 10:15:05 -04003538
3539 if (mask == TRACE_ITER_PRINTK)
3540 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003541
3542 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003543}
3544
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003545static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003546{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003547 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003548 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003549 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003550 int i;
3551
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003552 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003553
Li Zefan8d18eaa2009-12-08 11:17:06 +08003554 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003555 neg = 1;
3556 cmp += 2;
3557 }
3558
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003559 mutex_lock(&trace_types_lock);
3560
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003561 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003562 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003563 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003564 break;
3565 }
3566 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003567
3568 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003569 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003570 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003571
3572 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003573
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003574 return ret;
3575}
3576
3577static ssize_t
3578tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3579 size_t cnt, loff_t *ppos)
3580{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003581 struct seq_file *m = filp->private_data;
3582 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003583 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003584 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003585
3586 if (cnt >= sizeof(buf))
3587 return -EINVAL;
3588
3589 if (copy_from_user(&buf, ubuf, cnt))
3590 return -EFAULT;
3591
Steven Rostedta8dd2172013-01-09 20:54:17 -05003592 buf[cnt] = 0;
3593
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003594 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003595 if (ret < 0)
3596 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003597
Jiri Olsacf8517c2009-10-23 19:36:16 -04003598 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003599
3600 return cnt;
3601}
3602
Li Zefanfdb372e2009-12-08 11:15:59 +08003603static int tracing_trace_options_open(struct inode *inode, struct file *file)
3604{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003605 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003606 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003607
Li Zefanfdb372e2009-12-08 11:15:59 +08003608 if (tracing_disabled)
3609 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003610
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003611 if (trace_array_get(tr) < 0)
3612 return -ENODEV;
3613
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003614 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3615 if (ret < 0)
3616 trace_array_put(tr);
3617
3618 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003619}
3620
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003621static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003622 .open = tracing_trace_options_open,
3623 .read = seq_read,
3624 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003625 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003626 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003627};
3628
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003629static const char readme_msg[] =
3630 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003631 "# echo 0 > tracing_on : quick way to disable tracing\n"
3632 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3633 " Important files:\n"
3634 " trace\t\t\t- The static contents of the buffer\n"
3635 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3636 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3637 " current_tracer\t- function and latency tracers\n"
3638 " available_tracers\t- list of configured tracers for current_tracer\n"
3639 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3640 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3641 " trace_clock\t\t-change the clock used to order events\n"
3642 " local: Per cpu clock but may not be synced across CPUs\n"
3643 " global: Synced across CPUs but slows tracing down.\n"
3644 " counter: Not a clock, but just an increment\n"
3645 " uptime: Jiffy counter from time of boot\n"
3646 " perf: Same clock that perf events use\n"
3647#ifdef CONFIG_X86_64
3648 " x86-tsc: TSC cycle counter\n"
3649#endif
3650 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3651 " tracing_cpumask\t- Limit which CPUs to trace\n"
3652 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3653 "\t\t\t Remove sub-buffer with rmdir\n"
3654 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003655 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3656 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003657 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003658#ifdef CONFIG_DYNAMIC_FTRACE
3659 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003660 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3661 "\t\t\t functions\n"
3662 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3663 "\t modules: Can select a group via module\n"
3664 "\t Format: :mod:<module-name>\n"
3665 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3666 "\t triggers: a command to perform when function is hit\n"
3667 "\t Format: <function>:<trigger>[:count]\n"
3668 "\t trigger: traceon, traceoff\n"
3669 "\t\t enable_event:<system>:<event>\n"
3670 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003671#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003672 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003673#endif
3674#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003675 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003676#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003677 "\t\t dump\n"
3678 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003679 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3680 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3681 "\t The first one will disable tracing every time do_fault is hit\n"
3682 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3683 "\t The first time do trap is hit and it disables tracing, the\n"
3684 "\t counter will decrement to 2. If tracing is already disabled,\n"
3685 "\t the counter will not decrement. It only decrements when the\n"
3686 "\t trigger did work\n"
3687 "\t To remove trigger without count:\n"
3688 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3689 "\t To remove trigger with a count:\n"
3690 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003691 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003692 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3693 "\t modules: Can select a group via module command :mod:\n"
3694 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003695#endif /* CONFIG_DYNAMIC_FTRACE */
3696#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003697 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3698 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003699#endif
3700#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3701 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3702 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3703#endif
3704#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003705 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3706 "\t\t\t snapshot buffer. Read the contents for more\n"
3707 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003708#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003709#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003710 " stack_trace\t\t- Shows the max stack trace when active\n"
3711 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003712 "\t\t\t Write into this file to reset the max size (trigger a\n"
3713 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003714#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003715 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3716 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003717#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003718#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003719 " events/\t\t- Directory containing all trace event subsystems:\n"
3720 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3721 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003722 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3723 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003724 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003725 " events/<system>/<event>/\t- Directory containing control files for\n"
3726 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003727 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3728 " filter\t\t- If set, only events passing filter are traced\n"
3729 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003730 "\t Format: <trigger>[:count][if <filter>]\n"
3731 "\t trigger: traceon, traceoff\n"
3732 "\t enable_event:<system>:<event>\n"
3733 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003734#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003735 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003736#endif
3737#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003738 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003739#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003740 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3741 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3742 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3743 "\t events/block/block_unplug/trigger\n"
3744 "\t The first disables tracing every time block_unplug is hit.\n"
3745 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3746 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3747 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3748 "\t Like function triggers, the counter is only decremented if it\n"
3749 "\t enabled or disabled tracing.\n"
3750 "\t To remove a trigger without a count:\n"
3751 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3752 "\t To remove a trigger with a count:\n"
3753 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3754 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003755;
3756
3757static ssize_t
3758tracing_readme_read(struct file *filp, char __user *ubuf,
3759 size_t cnt, loff_t *ppos)
3760{
3761 return simple_read_from_buffer(ubuf, cnt, ppos,
3762 readme_msg, strlen(readme_msg));
3763}
3764
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003765static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003766 .open = tracing_open_generic,
3767 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003768 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003769};
3770
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003771static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003772{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003773 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003774
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003775 if (*pos || m->count)
3776 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003777
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003778 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003779
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003780 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3781 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003782 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003783 continue;
3784
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003785 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003786 }
3787
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003788 return NULL;
3789}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003790
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003791static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3792{
3793 void *v;
3794 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003795
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003796 preempt_disable();
3797 arch_spin_lock(&trace_cmdline_lock);
3798
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003799 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003800 while (l <= *pos) {
3801 v = saved_cmdlines_next(m, v, &l);
3802 if (!v)
3803 return NULL;
3804 }
3805
3806 return v;
3807}
3808
3809static void saved_cmdlines_stop(struct seq_file *m, void *v)
3810{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003811 arch_spin_unlock(&trace_cmdline_lock);
3812 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003813}
3814
3815static int saved_cmdlines_show(struct seq_file *m, void *v)
3816{
3817 char buf[TASK_COMM_LEN];
3818 unsigned int *pid = v;
3819
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003820 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003821 seq_printf(m, "%d %s\n", *pid, buf);
3822 return 0;
3823}
3824
3825static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3826 .start = saved_cmdlines_start,
3827 .next = saved_cmdlines_next,
3828 .stop = saved_cmdlines_stop,
3829 .show = saved_cmdlines_show,
3830};
3831
3832static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3833{
3834 if (tracing_disabled)
3835 return -ENODEV;
3836
3837 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003838}
3839
3840static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003841 .open = tracing_saved_cmdlines_open,
3842 .read = seq_read,
3843 .llseek = seq_lseek,
3844 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003845};
3846
3847static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003848tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3849 size_t cnt, loff_t *ppos)
3850{
3851 char buf[64];
3852 int r;
3853
3854 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003855 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003856 arch_spin_unlock(&trace_cmdline_lock);
3857
3858 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3859}
3860
3861static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3862{
3863 kfree(s->saved_cmdlines);
3864 kfree(s->map_cmdline_to_pid);
3865 kfree(s);
3866}
3867
3868static int tracing_resize_saved_cmdlines(unsigned int val)
3869{
3870 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3871
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003872 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003873 if (!s)
3874 return -ENOMEM;
3875
3876 if (allocate_cmdlines_buffer(val, s) < 0) {
3877 kfree(s);
3878 return -ENOMEM;
3879 }
3880
3881 arch_spin_lock(&trace_cmdline_lock);
3882 savedcmd_temp = savedcmd;
3883 savedcmd = s;
3884 arch_spin_unlock(&trace_cmdline_lock);
3885 free_saved_cmdlines_buffer(savedcmd_temp);
3886
3887 return 0;
3888}
3889
3890static ssize_t
3891tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3892 size_t cnt, loff_t *ppos)
3893{
3894 unsigned long val;
3895 int ret;
3896
3897 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3898 if (ret)
3899 return ret;
3900
3901 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3902 if (!val || val > PID_MAX_DEFAULT)
3903 return -EINVAL;
3904
3905 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3906 if (ret < 0)
3907 return ret;
3908
3909 *ppos += cnt;
3910
3911 return cnt;
3912}
3913
3914static const struct file_operations tracing_saved_cmdlines_size_fops = {
3915 .open = tracing_open_generic,
3916 .read = tracing_saved_cmdlines_size_read,
3917 .write = tracing_saved_cmdlines_size_write,
3918};
3919
3920static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003921tracing_set_trace_read(struct file *filp, char __user *ubuf,
3922 size_t cnt, loff_t *ppos)
3923{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003924 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003925 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003926 int r;
3927
3928 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003929 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003930 mutex_unlock(&trace_types_lock);
3931
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003932 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003933}
3934
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003935int tracer_init(struct tracer *t, struct trace_array *tr)
3936{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003937 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003938 return t->init(tr);
3939}
3940
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003941static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003942{
3943 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003944
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003945 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003946 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003947}
3948
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003949#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003950/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003951static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3952 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003953{
3954 int cpu, ret = 0;
3955
3956 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3957 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003958 ret = ring_buffer_resize(trace_buf->buffer,
3959 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003960 if (ret < 0)
3961 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003962 per_cpu_ptr(trace_buf->data, cpu)->entries =
3963 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003964 }
3965 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003966 ret = ring_buffer_resize(trace_buf->buffer,
3967 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003968 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003969 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3970 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003971 }
3972
3973 return ret;
3974}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003975#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003976
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003977static int __tracing_resize_ring_buffer(struct trace_array *tr,
3978 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003979{
3980 int ret;
3981
3982 /*
3983 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003984 * we use the size that was given, and we can forget about
3985 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003986 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003987 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003988
Steven Rostedtb382ede62012-10-10 21:44:34 -04003989 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003990 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003991 return 0;
3992
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003993 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003994 if (ret < 0)
3995 return ret;
3996
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003997#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003998 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3999 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004000 goto out;
4001
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004002 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004003 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004004 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4005 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004006 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004007 /*
4008 * AARGH! We are left with different
4009 * size max buffer!!!!
4010 * The max buffer is our "snapshot" buffer.
4011 * When a tracer needs a snapshot (one of the
4012 * latency tracers), it swaps the max buffer
4013 * with the saved snap shot. We succeeded to
4014 * update the size of the main buffer, but failed to
4015 * update the size of the max buffer. But when we tried
4016 * to reset the main buffer to the original size, we
4017 * failed there too. This is very unlikely to
4018 * happen, but if it does, warn and kill all
4019 * tracing.
4020 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004021 WARN_ON(1);
4022 tracing_disabled = 1;
4023 }
4024 return ret;
4025 }
4026
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004027 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004028 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004029 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004030 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004031
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004032 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004033#endif /* CONFIG_TRACER_MAX_TRACE */
4034
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004035 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004036 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004037 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004038 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004039
4040 return ret;
4041}
4042
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004043static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4044 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004045{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004046 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004047
4048 mutex_lock(&trace_types_lock);
4049
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004050 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4051 /* make sure, this cpu is enabled in the mask */
4052 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4053 ret = -EINVAL;
4054 goto out;
4055 }
4056 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004057
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004058 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004059 if (ret < 0)
4060 ret = -ENOMEM;
4061
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004062out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004063 mutex_unlock(&trace_types_lock);
4064
4065 return ret;
4066}
4067
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004068
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004069/**
4070 * tracing_update_buffers - used by tracing facility to expand ring buffers
4071 *
4072 * To save on memory when the tracing is never used on a system with it
4073 * configured in. The ring buffers are set to a minimum size. But once
4074 * a user starts to use the tracing facility, then they need to grow
4075 * to their default size.
4076 *
4077 * This function is to be called when a tracer is about to be used.
4078 */
4079int tracing_update_buffers(void)
4080{
4081 int ret = 0;
4082
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004083 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004084 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004085 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004086 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004087 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004088
4089 return ret;
4090}
4091
Steven Rostedt577b7852009-02-26 23:43:05 -05004092struct trace_option_dentry;
4093
4094static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004095create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004096
4097static void
4098destroy_trace_option_files(struct trace_option_dentry *topts);
4099
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004100/*
4101 * Used to clear out the tracer before deletion of an instance.
4102 * Must have trace_types_lock held.
4103 */
4104static void tracing_set_nop(struct trace_array *tr)
4105{
4106 if (tr->current_trace == &nop_trace)
4107 return;
4108
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004109 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004110
4111 if (tr->current_trace->reset)
4112 tr->current_trace->reset(tr);
4113
4114 tr->current_trace = &nop_trace;
4115}
4116
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004117static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004118{
Steven Rostedt577b7852009-02-26 23:43:05 -05004119 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004120 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004121#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004122 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004123#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004124 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004125
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004126 mutex_lock(&trace_types_lock);
4127
Steven Rostedt73c51622009-03-11 13:42:01 -04004128 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004129 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004130 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004131 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004132 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004133 ret = 0;
4134 }
4135
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004136 for (t = trace_types; t; t = t->next) {
4137 if (strcmp(t->name, buf) == 0)
4138 break;
4139 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004140 if (!t) {
4141 ret = -EINVAL;
4142 goto out;
4143 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004144 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004145 goto out;
4146
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004147 /* Some tracers are only allowed for the top level buffer */
4148 if (!trace_ok_for_array(t, tr)) {
4149 ret = -EINVAL;
4150 goto out;
4151 }
4152
Steven Rostedt9f029e82008-11-12 15:24:24 -05004153 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004154
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004155 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004156
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004157 if (tr->current_trace->reset)
4158 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004159
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004160 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004161 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004162
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004163#ifdef CONFIG_TRACER_MAX_TRACE
4164 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004165
4166 if (had_max_tr && !t->use_max_tr) {
4167 /*
4168 * We need to make sure that the update_max_tr sees that
4169 * current_trace changed to nop_trace to keep it from
4170 * swapping the buffers after we resize it.
4171 * The update_max_tr is called from interrupts disabled
4172 * so a synchronized_sched() is sufficient.
4173 */
4174 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004175 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004176 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004177#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004178 /* Currently, only the top instance has options */
4179 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4180 destroy_trace_option_files(topts);
4181 topts = create_trace_option_files(tr, t);
4182 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004183
4184#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004185 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004186 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004187 if (ret < 0)
4188 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004189 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004190#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004191
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004192 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004193 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004194 if (ret)
4195 goto out;
4196 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004197
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004198 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004199 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004200 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004201 out:
4202 mutex_unlock(&trace_types_lock);
4203
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004204 return ret;
4205}
4206
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004207static ssize_t
4208tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4209 size_t cnt, loff_t *ppos)
4210{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004211 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004212 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004213 int i;
4214 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004215 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004216
Steven Rostedt60063a62008-10-28 10:44:24 -04004217 ret = cnt;
4218
Li Zefanee6c2c12009-09-18 14:06:47 +08004219 if (cnt > MAX_TRACER_SIZE)
4220 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004221
4222 if (copy_from_user(&buf, ubuf, cnt))
4223 return -EFAULT;
4224
4225 buf[cnt] = 0;
4226
4227 /* strip ending whitespace. */
4228 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4229 buf[i] = 0;
4230
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004231 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004232 if (err)
4233 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004234
Jiri Olsacf8517c2009-10-23 19:36:16 -04004235 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004236
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004237 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004238}
4239
4240static ssize_t
4241tracing_max_lat_read(struct file *filp, char __user *ubuf,
4242 size_t cnt, loff_t *ppos)
4243{
4244 unsigned long *ptr = filp->private_data;
4245 char buf[64];
4246 int r;
4247
Steven Rostedtcffae432008-05-12 21:21:00 +02004248 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004249 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004250 if (r > sizeof(buf))
4251 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004252 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004253}
4254
4255static ssize_t
4256tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4257 size_t cnt, loff_t *ppos)
4258{
Hannes Eder5e398412009-02-10 19:44:34 +01004259 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01004260 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004261 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004262
Peter Huewe22fe9b52011-06-07 21:58:27 +02004263 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4264 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004265 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004266
4267 *ptr = val * 1000;
4268
4269 return cnt;
4270}
4271
Steven Rostedtb3806b42008-05-12 21:20:46 +02004272static int tracing_open_pipe(struct inode *inode, struct file *filp)
4273{
Oleg Nesterov15544202013-07-23 17:25:57 +02004274 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004275 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004276 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004277
4278 if (tracing_disabled)
4279 return -ENODEV;
4280
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004281 if (trace_array_get(tr) < 0)
4282 return -ENODEV;
4283
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004284 mutex_lock(&trace_types_lock);
4285
Steven Rostedtb3806b42008-05-12 21:20:46 +02004286 /* create a buffer to store the information to pass to userspace */
4287 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004288 if (!iter) {
4289 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004290 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004291 goto out;
4292 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004293
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004294 /*
4295 * We make a copy of the current tracer to avoid concurrent
4296 * changes on it while we are reading.
4297 */
4298 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4299 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004300 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004301 goto fail;
4302 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004303 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004304
4305 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4306 ret = -ENOMEM;
4307 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304308 }
4309
Steven Rostedta3097202008-11-07 22:36:02 -05004310 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304311 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004312
Steven Rostedt112f38a72009-06-01 15:16:05 -04004313 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4314 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4315
David Sharp8be07092012-11-13 12:18:22 -08004316 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004317 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004318 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4319
Oleg Nesterov15544202013-07-23 17:25:57 +02004320 iter->tr = tr;
4321 iter->trace_buffer = &tr->trace_buffer;
4322 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004323 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004324 filp->private_data = iter;
4325
Steven Rostedt107bad82008-05-12 21:21:01 +02004326 if (iter->trace->pipe_open)
4327 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004328
Arnd Bergmannb4447862010-07-07 23:40:11 +02004329 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004330out:
4331 mutex_unlock(&trace_types_lock);
4332 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004333
4334fail:
4335 kfree(iter->trace);
4336 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004337 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004338 mutex_unlock(&trace_types_lock);
4339 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004340}
4341
4342static int tracing_release_pipe(struct inode *inode, struct file *file)
4343{
4344 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004345 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004346
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004347 mutex_lock(&trace_types_lock);
4348
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004349 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004350 iter->trace->pipe_close(iter);
4351
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004352 mutex_unlock(&trace_types_lock);
4353
Rusty Russell44623442009-01-01 10:12:23 +10304354 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004355 mutex_destroy(&iter->mutex);
4356 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004357 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004358
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004359 trace_array_put(tr);
4360
Steven Rostedtb3806b42008-05-12 21:20:46 +02004361 return 0;
4362}
4363
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004364static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004365trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004366{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004367 /* Iterators are static, they should be filled or empty */
4368 if (trace_buffer_iter(iter, iter->cpu_file))
4369 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004370
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004371 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004372 /*
4373 * Always select as readable when in blocking mode
4374 */
4375 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004376 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004377 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004378 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004379}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004380
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004381static unsigned int
4382tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4383{
4384 struct trace_iterator *iter = filp->private_data;
4385
4386 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004387}
4388
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004389/* Must be called with trace_types_lock mutex held. */
4390static int tracing_wait_pipe(struct file *filp)
4391{
4392 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004393 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004394
4395 while (trace_empty(iter)) {
4396
4397 if ((filp->f_flags & O_NONBLOCK)) {
4398 return -EAGAIN;
4399 }
4400
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004401 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004402 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004403 * We still block if tracing is disabled, but we have never
4404 * read anything. This allows a user to cat this file, and
4405 * then enable tracing. But after we have read something,
4406 * we give an EOF when tracing is again disabled.
4407 *
4408 * iter->pos will be 0 if we haven't read anything.
4409 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004410 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004411 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004412
4413 mutex_unlock(&iter->mutex);
4414
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004415 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004416
4417 mutex_lock(&iter->mutex);
4418
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004419 if (ret)
4420 return ret;
4421
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004422 if (signal_pending(current))
4423 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004424 }
4425
4426 return 1;
4427}
4428
Steven Rostedtb3806b42008-05-12 21:20:46 +02004429/*
4430 * Consumer reader.
4431 */
4432static ssize_t
4433tracing_read_pipe(struct file *filp, char __user *ubuf,
4434 size_t cnt, loff_t *ppos)
4435{
4436 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004437 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004438 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004439
4440 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004441 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4442 if (sret != -EBUSY)
4443 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004444
Steven Rostedtf9520752009-03-02 14:04:40 -05004445 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004446
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004447 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004448 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004449 if (unlikely(iter->trace->name != tr->current_trace->name))
4450 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004451 mutex_unlock(&trace_types_lock);
4452
4453 /*
4454 * Avoid more than one consumer on a single file descriptor
4455 * This is just a matter of traces coherency, the ring buffer itself
4456 * is protected.
4457 */
4458 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004459 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004460 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4461 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004462 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004463 }
4464
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004465waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004466 sret = tracing_wait_pipe(filp);
4467 if (sret <= 0)
4468 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004469
4470 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004471 if (trace_empty(iter)) {
4472 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004473 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004474 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004475
4476 if (cnt >= PAGE_SIZE)
4477 cnt = PAGE_SIZE - 1;
4478
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004479 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004480 memset(&iter->seq, 0,
4481 sizeof(struct trace_iterator) -
4482 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004483 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004484 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004485
Lai Jiangshan4f535962009-05-18 19:35:34 +08004486 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004487 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004488 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004489 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004490 int len = iter->seq.len;
4491
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004492 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004493 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004494 /* don't print partial lines */
4495 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004496 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004497 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004498 if (ret != TRACE_TYPE_NO_CONSUME)
4499 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004500
4501 if (iter->seq.len >= cnt)
4502 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004503
4504 /*
4505 * Setting the full flag means we reached the trace_seq buffer
4506 * size and we should leave by partial output condition above.
4507 * One of the trace_seq_* functions is not used properly.
4508 */
4509 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4510 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004511 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004512 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004513 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004514
Steven Rostedtb3806b42008-05-12 21:20:46 +02004515 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004516 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4517 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004518 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004519
4520 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004521 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004522 * entries, go back to wait for more entries.
4523 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004524 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004525 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004526
Steven Rostedt107bad82008-05-12 21:21:01 +02004527out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004528 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004529
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004530 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004531}
4532
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004533static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4534 unsigned int idx)
4535{
4536 __free_page(spd->pages[idx]);
4537}
4538
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004539static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004540 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004541 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004542 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004543 .steal = generic_pipe_buf_steal,
4544 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004545};
4546
Steven Rostedt34cd4992009-02-09 12:06:29 -05004547static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004548tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004549{
4550 size_t count;
4551 int ret;
4552
4553 /* Seq buffer is page-sized, exactly what we need. */
4554 for (;;) {
4555 count = iter->seq.len;
4556 ret = print_trace_line(iter);
4557 count = iter->seq.len - count;
4558 if (rem < count) {
4559 rem = 0;
4560 iter->seq.len -= count;
4561 break;
4562 }
4563 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4564 iter->seq.len -= count;
4565 break;
4566 }
4567
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004568 if (ret != TRACE_TYPE_NO_CONSUME)
4569 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004570 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004571 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004572 rem = 0;
4573 iter->ent = NULL;
4574 break;
4575 }
4576 }
4577
4578 return rem;
4579}
4580
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004581static ssize_t tracing_splice_read_pipe(struct file *filp,
4582 loff_t *ppos,
4583 struct pipe_inode_info *pipe,
4584 size_t len,
4585 unsigned int flags)
4586{
Jens Axboe35f3d142010-05-20 10:43:18 +02004587 struct page *pages_def[PIPE_DEF_BUFFERS];
4588 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004589 struct trace_iterator *iter = filp->private_data;
4590 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004591 .pages = pages_def,
4592 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004593 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004594 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004595 .flags = flags,
4596 .ops = &tracing_pipe_buf_ops,
4597 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004598 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004599 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004600 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004601 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004602 unsigned int i;
4603
Jens Axboe35f3d142010-05-20 10:43:18 +02004604 if (splice_grow_spd(pipe, &spd))
4605 return -ENOMEM;
4606
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004607 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004608 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004609 if (unlikely(iter->trace->name != tr->current_trace->name))
4610 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004611 mutex_unlock(&trace_types_lock);
4612
4613 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004614
4615 if (iter->trace->splice_read) {
4616 ret = iter->trace->splice_read(iter, filp,
4617 ppos, pipe, len, flags);
4618 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004619 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004620 }
4621
4622 ret = tracing_wait_pipe(filp);
4623 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004624 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004625
Jason Wessel955b61e2010-08-05 09:22:23 -05004626 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004627 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004628 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004629 }
4630
Lai Jiangshan4f535962009-05-18 19:35:34 +08004631 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004632 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004633
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004634 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004635 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004636 spd.pages[i] = alloc_page(GFP_KERNEL);
4637 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004638 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004639
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004640 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004641
4642 /* Copy the data into the page, so we can start over. */
4643 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004644 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004645 iter->seq.len);
4646 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004647 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004648 break;
4649 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004650 spd.partial[i].offset = 0;
4651 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004652
Steven Rostedtf9520752009-03-02 14:04:40 -05004653 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004654 }
4655
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004656 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004657 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004658 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004659
4660 spd.nr_pages = i;
4661
Jens Axboe35f3d142010-05-20 10:43:18 +02004662 ret = splice_to_pipe(pipe, &spd);
4663out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004664 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004665 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004666
Steven Rostedt34cd4992009-02-09 12:06:29 -05004667out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004668 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004669 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004670}
4671
Steven Rostedta98a3c32008-05-12 21:20:59 +02004672static ssize_t
4673tracing_entries_read(struct file *filp, char __user *ubuf,
4674 size_t cnt, loff_t *ppos)
4675{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004676 struct inode *inode = file_inode(filp);
4677 struct trace_array *tr = inode->i_private;
4678 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004679 char buf[64];
4680 int r = 0;
4681 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004682
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004683 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004684
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004685 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004686 int cpu, buf_size_same;
4687 unsigned long size;
4688
4689 size = 0;
4690 buf_size_same = 1;
4691 /* check if all cpu sizes are same */
4692 for_each_tracing_cpu(cpu) {
4693 /* fill in the size from first enabled cpu */
4694 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004695 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4696 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004697 buf_size_same = 0;
4698 break;
4699 }
4700 }
4701
4702 if (buf_size_same) {
4703 if (!ring_buffer_expanded)
4704 r = sprintf(buf, "%lu (expanded: %lu)\n",
4705 size >> 10,
4706 trace_buf_size >> 10);
4707 else
4708 r = sprintf(buf, "%lu\n", size >> 10);
4709 } else
4710 r = sprintf(buf, "X\n");
4711 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004712 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004713
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004714 mutex_unlock(&trace_types_lock);
4715
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004716 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4717 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004718}
4719
4720static ssize_t
4721tracing_entries_write(struct file *filp, const char __user *ubuf,
4722 size_t cnt, loff_t *ppos)
4723{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004724 struct inode *inode = file_inode(filp);
4725 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004726 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004727 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004728
Peter Huewe22fe9b52011-06-07 21:58:27 +02004729 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4730 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004731 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004732
4733 /* must have at least 1 entry */
4734 if (!val)
4735 return -EINVAL;
4736
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004737 /* value is in KB */
4738 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004739 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004740 if (ret < 0)
4741 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004742
Jiri Olsacf8517c2009-10-23 19:36:16 -04004743 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004744
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004745 return cnt;
4746}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004747
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004748static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004749tracing_total_entries_read(struct file *filp, char __user *ubuf,
4750 size_t cnt, loff_t *ppos)
4751{
4752 struct trace_array *tr = filp->private_data;
4753 char buf[64];
4754 int r, cpu;
4755 unsigned long size = 0, expanded_size = 0;
4756
4757 mutex_lock(&trace_types_lock);
4758 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004759 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004760 if (!ring_buffer_expanded)
4761 expanded_size += trace_buf_size >> 10;
4762 }
4763 if (ring_buffer_expanded)
4764 r = sprintf(buf, "%lu\n", size);
4765 else
4766 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4767 mutex_unlock(&trace_types_lock);
4768
4769 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4770}
4771
4772static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004773tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4774 size_t cnt, loff_t *ppos)
4775{
4776 /*
4777 * There is no need to read what the user has written, this function
4778 * is just to make sure that there is no error when "echo" is used
4779 */
4780
4781 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004782
4783 return cnt;
4784}
4785
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004786static int
4787tracing_free_buffer_release(struct inode *inode, struct file *filp)
4788{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004789 struct trace_array *tr = inode->i_private;
4790
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004791 /* disable tracing ? */
4792 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004793 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004794 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004795 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004796
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004797 trace_array_put(tr);
4798
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004799 return 0;
4800}
4801
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004802static ssize_t
4803tracing_mark_write(struct file *filp, const char __user *ubuf,
4804 size_t cnt, loff_t *fpos)
4805{
Steven Rostedtd696b582011-09-22 11:50:27 -04004806 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004807 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004808 struct ring_buffer_event *event;
4809 struct ring_buffer *buffer;
4810 struct print_entry *entry;
4811 unsigned long irq_flags;
4812 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004813 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004814 int nr_pages = 1;
4815 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004816 int offset;
4817 int size;
4818 int len;
4819 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004820 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004821
Steven Rostedtc76f0692008-11-07 22:36:02 -05004822 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004823 return -EINVAL;
4824
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004825 if (!(trace_flags & TRACE_ITER_MARKERS))
4826 return -EINVAL;
4827
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004828 if (cnt > TRACE_BUF_SIZE)
4829 cnt = TRACE_BUF_SIZE;
4830
Steven Rostedtd696b582011-09-22 11:50:27 -04004831 /*
4832 * Userspace is injecting traces into the kernel trace buffer.
4833 * We want to be as non intrusive as possible.
4834 * To do so, we do not want to allocate any special buffers
4835 * or take any locks, but instead write the userspace data
4836 * straight into the ring buffer.
4837 *
4838 * First we need to pin the userspace buffer into memory,
4839 * which, most likely it is, because it just referenced it.
4840 * But there's no guarantee that it is. By using get_user_pages_fast()
4841 * and kmap_atomic/kunmap_atomic() we can get access to the
4842 * pages directly. We then write the data directly into the
4843 * ring buffer.
4844 */
4845 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004846
Steven Rostedtd696b582011-09-22 11:50:27 -04004847 /* check if we cross pages */
4848 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4849 nr_pages = 2;
4850
4851 offset = addr & (PAGE_SIZE - 1);
4852 addr &= PAGE_MASK;
4853
4854 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4855 if (ret < nr_pages) {
4856 while (--ret >= 0)
4857 put_page(pages[ret]);
4858 written = -EFAULT;
4859 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004860 }
4861
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004862 for (i = 0; i < nr_pages; i++)
4863 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004864
4865 local_save_flags(irq_flags);
4866 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004867 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004868 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4869 irq_flags, preempt_count());
4870 if (!event) {
4871 /* Ring buffer disabled, return as if not open for write */
4872 written = -EBADF;
4873 goto out_unlock;
4874 }
4875
4876 entry = ring_buffer_event_data(event);
4877 entry->ip = _THIS_IP_;
4878
4879 if (nr_pages == 2) {
4880 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004881 memcpy(&entry->buf, map_page[0] + offset, len);
4882 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004883 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004884 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004885
4886 if (entry->buf[cnt - 1] != '\n') {
4887 entry->buf[cnt] = '\n';
4888 entry->buf[cnt + 1] = '\0';
4889 } else
4890 entry->buf[cnt] = '\0';
4891
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004892 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004893
4894 written = cnt;
4895
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004896 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004897
Steven Rostedtd696b582011-09-22 11:50:27 -04004898 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004899 for (i = 0; i < nr_pages; i++){
4900 kunmap_atomic(map_page[i]);
4901 put_page(pages[i]);
4902 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004903 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004904 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004905}
4906
Li Zefan13f16d22009-12-08 11:16:11 +08004907static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004908{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004909 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004910 int i;
4911
4912 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004913 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004914 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004915 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4916 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004917 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004918
Li Zefan13f16d22009-12-08 11:16:11 +08004919 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004920}
4921
Steven Rostedte1e232c2014-02-10 23:38:46 -05004922static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004923{
Zhaolei5079f322009-08-25 16:12:56 +08004924 int i;
4925
Zhaolei5079f322009-08-25 16:12:56 +08004926 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4927 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4928 break;
4929 }
4930 if (i == ARRAY_SIZE(trace_clocks))
4931 return -EINVAL;
4932
Zhaolei5079f322009-08-25 16:12:56 +08004933 mutex_lock(&trace_types_lock);
4934
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004935 tr->clock_id = i;
4936
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004937 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004938
David Sharp60303ed2012-10-11 16:27:52 -07004939 /*
4940 * New clock may not be consistent with the previous clock.
4941 * Reset the buffer so that it doesn't have incomparable timestamps.
4942 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004943 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004944
4945#ifdef CONFIG_TRACER_MAX_TRACE
4946 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4947 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004948 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004949#endif
David Sharp60303ed2012-10-11 16:27:52 -07004950
Zhaolei5079f322009-08-25 16:12:56 +08004951 mutex_unlock(&trace_types_lock);
4952
Steven Rostedte1e232c2014-02-10 23:38:46 -05004953 return 0;
4954}
4955
4956static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4957 size_t cnt, loff_t *fpos)
4958{
4959 struct seq_file *m = filp->private_data;
4960 struct trace_array *tr = m->private;
4961 char buf[64];
4962 const char *clockstr;
4963 int ret;
4964
4965 if (cnt >= sizeof(buf))
4966 return -EINVAL;
4967
4968 if (copy_from_user(&buf, ubuf, cnt))
4969 return -EFAULT;
4970
4971 buf[cnt] = 0;
4972
4973 clockstr = strstrip(buf);
4974
4975 ret = tracing_set_clock(tr, clockstr);
4976 if (ret)
4977 return ret;
4978
Zhaolei5079f322009-08-25 16:12:56 +08004979 *fpos += cnt;
4980
4981 return cnt;
4982}
4983
Li Zefan13f16d22009-12-08 11:16:11 +08004984static int tracing_clock_open(struct inode *inode, struct file *file)
4985{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004986 struct trace_array *tr = inode->i_private;
4987 int ret;
4988
Li Zefan13f16d22009-12-08 11:16:11 +08004989 if (tracing_disabled)
4990 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004991
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004992 if (trace_array_get(tr))
4993 return -ENODEV;
4994
4995 ret = single_open(file, tracing_clock_show, inode->i_private);
4996 if (ret < 0)
4997 trace_array_put(tr);
4998
4999 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005000}
5001
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005002struct ftrace_buffer_info {
5003 struct trace_iterator iter;
5004 void *spare;
5005 unsigned int read;
5006};
5007
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005008#ifdef CONFIG_TRACER_SNAPSHOT
5009static int tracing_snapshot_open(struct inode *inode, struct file *file)
5010{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005011 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005012 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005013 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005014 int ret = 0;
5015
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005016 if (trace_array_get(tr) < 0)
5017 return -ENODEV;
5018
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005019 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005020 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005021 if (IS_ERR(iter))
5022 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005023 } else {
5024 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005025 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005026 m = kzalloc(sizeof(*m), GFP_KERNEL);
5027 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005028 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005029 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5030 if (!iter) {
5031 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005032 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005033 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005034 ret = 0;
5035
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005036 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005037 iter->trace_buffer = &tr->max_buffer;
5038 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005039 m->private = iter;
5040 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005041 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005042out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005043 if (ret < 0)
5044 trace_array_put(tr);
5045
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005046 return ret;
5047}
5048
5049static ssize_t
5050tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5051 loff_t *ppos)
5052{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005053 struct seq_file *m = filp->private_data;
5054 struct trace_iterator *iter = m->private;
5055 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005056 unsigned long val;
5057 int ret;
5058
5059 ret = tracing_update_buffers();
5060 if (ret < 0)
5061 return ret;
5062
5063 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5064 if (ret)
5065 return ret;
5066
5067 mutex_lock(&trace_types_lock);
5068
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005069 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005070 ret = -EBUSY;
5071 goto out;
5072 }
5073
5074 switch (val) {
5075 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005076 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5077 ret = -EINVAL;
5078 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005079 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005080 if (tr->allocated_snapshot)
5081 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005082 break;
5083 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005084/* Only allow per-cpu swap if the ring buffer supports it */
5085#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5086 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5087 ret = -EINVAL;
5088 break;
5089 }
5090#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005091 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005092 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005093 if (ret < 0)
5094 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005095 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005096 local_irq_disable();
5097 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005098 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005099 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005100 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005101 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005102 local_irq_enable();
5103 break;
5104 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005105 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005106 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5107 tracing_reset_online_cpus(&tr->max_buffer);
5108 else
5109 tracing_reset(&tr->max_buffer, iter->cpu_file);
5110 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005111 break;
5112 }
5113
5114 if (ret >= 0) {
5115 *ppos += cnt;
5116 ret = cnt;
5117 }
5118out:
5119 mutex_unlock(&trace_types_lock);
5120 return ret;
5121}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005122
5123static int tracing_snapshot_release(struct inode *inode, struct file *file)
5124{
5125 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005126 int ret;
5127
5128 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005129
5130 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005131 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005132
5133 /* If write only, the seq_file is just a stub */
5134 if (m)
5135 kfree(m->private);
5136 kfree(m);
5137
5138 return 0;
5139}
5140
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005141static int tracing_buffers_open(struct inode *inode, struct file *filp);
5142static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5143 size_t count, loff_t *ppos);
5144static int tracing_buffers_release(struct inode *inode, struct file *file);
5145static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5146 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5147
5148static int snapshot_raw_open(struct inode *inode, struct file *filp)
5149{
5150 struct ftrace_buffer_info *info;
5151 int ret;
5152
5153 ret = tracing_buffers_open(inode, filp);
5154 if (ret < 0)
5155 return ret;
5156
5157 info = filp->private_data;
5158
5159 if (info->iter.trace->use_max_tr) {
5160 tracing_buffers_release(inode, filp);
5161 return -EBUSY;
5162 }
5163
5164 info->iter.snapshot = true;
5165 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5166
5167 return ret;
5168}
5169
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005170#endif /* CONFIG_TRACER_SNAPSHOT */
5171
5172
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005173static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005174 .open = tracing_open_generic,
5175 .read = tracing_max_lat_read,
5176 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005177 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005178};
5179
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005180static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005181 .open = tracing_open_generic,
5182 .read = tracing_set_trace_read,
5183 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005184 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005185};
5186
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005187static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005188 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005189 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005190 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005191 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005192 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005193 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005194};
5195
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005196static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005197 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005198 .read = tracing_entries_read,
5199 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005200 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005201 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005202};
5203
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005204static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005205 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005206 .read = tracing_total_entries_read,
5207 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005208 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005209};
5210
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005211static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005212 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005213 .write = tracing_free_buffer_write,
5214 .release = tracing_free_buffer_release,
5215};
5216
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005217static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005218 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005219 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005220 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005221 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005222};
5223
Zhaolei5079f322009-08-25 16:12:56 +08005224static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005225 .open = tracing_clock_open,
5226 .read = seq_read,
5227 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005228 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005229 .write = tracing_clock_write,
5230};
5231
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005232#ifdef CONFIG_TRACER_SNAPSHOT
5233static const struct file_operations snapshot_fops = {
5234 .open = tracing_snapshot_open,
5235 .read = seq_read,
5236 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005237 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005238 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005239};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005240
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005241static const struct file_operations snapshot_raw_fops = {
5242 .open = snapshot_raw_open,
5243 .read = tracing_buffers_read,
5244 .release = tracing_buffers_release,
5245 .splice_read = tracing_buffers_splice_read,
5246 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005247};
5248
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005249#endif /* CONFIG_TRACER_SNAPSHOT */
5250
Steven Rostedt2cadf912008-12-01 22:20:19 -05005251static int tracing_buffers_open(struct inode *inode, struct file *filp)
5252{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005253 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005254 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005255 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005256
5257 if (tracing_disabled)
5258 return -ENODEV;
5259
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005260 if (trace_array_get(tr) < 0)
5261 return -ENODEV;
5262
Steven Rostedt2cadf912008-12-01 22:20:19 -05005263 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005264 if (!info) {
5265 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005266 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005267 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005268
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005269 mutex_lock(&trace_types_lock);
5270
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005271 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005272 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005273 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005274 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005275 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005276 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005277 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005278
5279 filp->private_data = info;
5280
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005281 mutex_unlock(&trace_types_lock);
5282
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005283 ret = nonseekable_open(inode, filp);
5284 if (ret < 0)
5285 trace_array_put(tr);
5286
5287 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005288}
5289
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005290static unsigned int
5291tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5292{
5293 struct ftrace_buffer_info *info = filp->private_data;
5294 struct trace_iterator *iter = &info->iter;
5295
5296 return trace_poll(iter, filp, poll_table);
5297}
5298
Steven Rostedt2cadf912008-12-01 22:20:19 -05005299static ssize_t
5300tracing_buffers_read(struct file *filp, char __user *ubuf,
5301 size_t count, loff_t *ppos)
5302{
5303 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005304 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005305 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005306 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005307
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005308 if (!count)
5309 return 0;
5310
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005311 mutex_lock(&trace_types_lock);
5312
5313#ifdef CONFIG_TRACER_MAX_TRACE
5314 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5315 size = -EBUSY;
5316 goto out_unlock;
5317 }
5318#endif
5319
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005320 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005321 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5322 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005323 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005324 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005325 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005326
Steven Rostedt2cadf912008-12-01 22:20:19 -05005327 /* Do we have previous read data to read? */
5328 if (info->read < PAGE_SIZE)
5329 goto read;
5330
Steven Rostedtb6273442013-02-28 13:44:11 -05005331 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005332 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005333 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005334 &info->spare,
5335 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005336 iter->cpu_file, 0);
5337 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005338
5339 if (ret < 0) {
5340 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005341 if ((filp->f_flags & O_NONBLOCK)) {
5342 size = -EAGAIN;
5343 goto out_unlock;
5344 }
5345 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005346 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005347 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005348 if (ret) {
5349 size = ret;
5350 goto out_unlock;
5351 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005352 if (signal_pending(current)) {
5353 size = -EINTR;
5354 goto out_unlock;
5355 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005356 goto again;
5357 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005358 size = 0;
5359 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005360 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005361
Steven Rostedt436fc282011-10-14 10:44:25 -04005362 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005363 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005364 size = PAGE_SIZE - info->read;
5365 if (size > count)
5366 size = count;
5367
5368 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005369 if (ret == size) {
5370 size = -EFAULT;
5371 goto out_unlock;
5372 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005373 size -= ret;
5374
Steven Rostedt2cadf912008-12-01 22:20:19 -05005375 *ppos += size;
5376 info->read += size;
5377
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005378 out_unlock:
5379 mutex_unlock(&trace_types_lock);
5380
Steven Rostedt2cadf912008-12-01 22:20:19 -05005381 return size;
5382}
5383
5384static int tracing_buffers_release(struct inode *inode, struct file *file)
5385{
5386 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005387 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005388
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005389 mutex_lock(&trace_types_lock);
5390
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005391 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005392
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005393 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005394 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005395 kfree(info);
5396
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005397 mutex_unlock(&trace_types_lock);
5398
Steven Rostedt2cadf912008-12-01 22:20:19 -05005399 return 0;
5400}
5401
5402struct buffer_ref {
5403 struct ring_buffer *buffer;
5404 void *page;
5405 int ref;
5406};
5407
5408static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5409 struct pipe_buffer *buf)
5410{
5411 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5412
5413 if (--ref->ref)
5414 return;
5415
5416 ring_buffer_free_read_page(ref->buffer, ref->page);
5417 kfree(ref);
5418 buf->private = 0;
5419}
5420
Steven Rostedt2cadf912008-12-01 22:20:19 -05005421static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5422 struct pipe_buffer *buf)
5423{
5424 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5425
5426 ref->ref++;
5427}
5428
5429/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005430static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005431 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005432 .confirm = generic_pipe_buf_confirm,
5433 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005434 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005435 .get = buffer_pipe_buf_get,
5436};
5437
5438/*
5439 * Callback from splice_to_pipe(), if we need to release some pages
5440 * at the end of the spd in case we error'ed out in filling the pipe.
5441 */
5442static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5443{
5444 struct buffer_ref *ref =
5445 (struct buffer_ref *)spd->partial[i].private;
5446
5447 if (--ref->ref)
5448 return;
5449
5450 ring_buffer_free_read_page(ref->buffer, ref->page);
5451 kfree(ref);
5452 spd->partial[i].private = 0;
5453}
5454
5455static ssize_t
5456tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5457 struct pipe_inode_info *pipe, size_t len,
5458 unsigned int flags)
5459{
5460 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005461 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005462 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5463 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005464 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005465 .pages = pages_def,
5466 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005467 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005468 .flags = flags,
5469 .ops = &buffer_pipe_buf_ops,
5470 .spd_release = buffer_spd_release,
5471 };
5472 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005473 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005474 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005475
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005476 mutex_lock(&trace_types_lock);
5477
5478#ifdef CONFIG_TRACER_MAX_TRACE
5479 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5480 ret = -EBUSY;
5481 goto out;
5482 }
5483#endif
5484
5485 if (splice_grow_spd(pipe, &spd)) {
5486 ret = -ENOMEM;
5487 goto out;
5488 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005489
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005490 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005491 ret = -EINVAL;
5492 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005493 }
5494
5495 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005496 if (len < PAGE_SIZE) {
5497 ret = -EINVAL;
5498 goto out;
5499 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005500 len &= PAGE_MASK;
5501 }
5502
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005503 again:
5504 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005505 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005506
Al Viroa786c062014-04-11 12:01:03 -04005507 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005508 struct page *page;
5509 int r;
5510
5511 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5512 if (!ref)
5513 break;
5514
Steven Rostedt7267fa62009-04-29 00:16:21 -04005515 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005516 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005517 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005518 if (!ref->page) {
5519 kfree(ref);
5520 break;
5521 }
5522
5523 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005524 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005525 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005526 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005527 kfree(ref);
5528 break;
5529 }
5530
5531 /*
5532 * zero out any left over data, this is going to
5533 * user land.
5534 */
5535 size = ring_buffer_page_len(ref->page);
5536 if (size < PAGE_SIZE)
5537 memset(ref->page + size, 0, PAGE_SIZE - size);
5538
5539 page = virt_to_page(ref->page);
5540
5541 spd.pages[i] = page;
5542 spd.partial[i].len = PAGE_SIZE;
5543 spd.partial[i].offset = 0;
5544 spd.partial[i].private = (unsigned long)ref;
5545 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005546 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005547
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005548 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005549 }
5550
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005551 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005552 spd.nr_pages = i;
5553
5554 /* did we read anything? */
5555 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005556 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005557 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005558 goto out;
5559 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005560 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005561 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005562 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005563 if (ret)
5564 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005565 if (signal_pending(current)) {
5566 ret = -EINTR;
5567 goto out;
5568 }
5569 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005570 }
5571
5572 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005573 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005574out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005575 mutex_unlock(&trace_types_lock);
5576
Steven Rostedt2cadf912008-12-01 22:20:19 -05005577 return ret;
5578}
5579
5580static const struct file_operations tracing_buffers_fops = {
5581 .open = tracing_buffers_open,
5582 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005583 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005584 .release = tracing_buffers_release,
5585 .splice_read = tracing_buffers_splice_read,
5586 .llseek = no_llseek,
5587};
5588
Steven Rostedtc8d77182009-04-29 18:03:45 -04005589static ssize_t
5590tracing_stats_read(struct file *filp, char __user *ubuf,
5591 size_t count, loff_t *ppos)
5592{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005593 struct inode *inode = file_inode(filp);
5594 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005595 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005596 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005597 struct trace_seq *s;
5598 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005599 unsigned long long t;
5600 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005601
Li Zefane4f2d102009-06-15 10:57:28 +08005602 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005603 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005604 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005605
5606 trace_seq_init(s);
5607
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005608 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005609 trace_seq_printf(s, "entries: %ld\n", cnt);
5610
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005611 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005612 trace_seq_printf(s, "overrun: %ld\n", cnt);
5613
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005614 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005615 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5616
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005617 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005618 trace_seq_printf(s, "bytes: %ld\n", cnt);
5619
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005620 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005621 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005622 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005623 usec_rem = do_div(t, USEC_PER_SEC);
5624 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5625 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005626
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005627 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005628 usec_rem = do_div(t, USEC_PER_SEC);
5629 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5630 } else {
5631 /* counter or tsc mode for trace_clock */
5632 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005633 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005634
5635 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005636 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08005637 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005638
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005639 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005640 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5641
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005642 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005643 trace_seq_printf(s, "read events: %ld\n", cnt);
5644
Steven Rostedtc8d77182009-04-29 18:03:45 -04005645 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5646
5647 kfree(s);
5648
5649 return count;
5650}
5651
5652static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005653 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005654 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005655 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005656 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005657};
5658
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005659#ifdef CONFIG_DYNAMIC_FTRACE
5660
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005661int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005662{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005663 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005664}
5665
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005666static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005667tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005668 size_t cnt, loff_t *ppos)
5669{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005670 static char ftrace_dyn_info_buffer[1024];
5671 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005672 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005673 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005674 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005675 int r;
5676
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005677 mutex_lock(&dyn_info_mutex);
5678 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005679
Steven Rostedta26a2a22008-10-31 00:03:22 -04005680 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005681 buf[r++] = '\n';
5682
5683 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5684
5685 mutex_unlock(&dyn_info_mutex);
5686
5687 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005688}
5689
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005690static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005691 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005692 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005693 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005694};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005695#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005696
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005697#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5698static void
5699ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005700{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005701 tracing_snapshot();
5702}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005703
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005704static void
5705ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5706{
5707 unsigned long *count = (long *)data;
5708
5709 if (!*count)
5710 return;
5711
5712 if (*count != -1)
5713 (*count)--;
5714
5715 tracing_snapshot();
5716}
5717
5718static int
5719ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5720 struct ftrace_probe_ops *ops, void *data)
5721{
5722 long count = (long)data;
5723
5724 seq_printf(m, "%ps:", (void *)ip);
5725
5726 seq_printf(m, "snapshot");
5727
5728 if (count == -1)
5729 seq_printf(m, ":unlimited\n");
5730 else
5731 seq_printf(m, ":count=%ld\n", count);
5732
5733 return 0;
5734}
5735
5736static struct ftrace_probe_ops snapshot_probe_ops = {
5737 .func = ftrace_snapshot,
5738 .print = ftrace_snapshot_print,
5739};
5740
5741static struct ftrace_probe_ops snapshot_count_probe_ops = {
5742 .func = ftrace_count_snapshot,
5743 .print = ftrace_snapshot_print,
5744};
5745
5746static int
5747ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5748 char *glob, char *cmd, char *param, int enable)
5749{
5750 struct ftrace_probe_ops *ops;
5751 void *count = (void *)-1;
5752 char *number;
5753 int ret;
5754
5755 /* hash funcs only work with set_ftrace_filter */
5756 if (!enable)
5757 return -EINVAL;
5758
5759 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5760
5761 if (glob[0] == '!') {
5762 unregister_ftrace_function_probe_func(glob+1, ops);
5763 return 0;
5764 }
5765
5766 if (!param)
5767 goto out_reg;
5768
5769 number = strsep(&param, ":");
5770
5771 if (!strlen(number))
5772 goto out_reg;
5773
5774 /*
5775 * We use the callback data field (which is a pointer)
5776 * as our counter.
5777 */
5778 ret = kstrtoul(number, 0, (unsigned long *)&count);
5779 if (ret)
5780 return ret;
5781
5782 out_reg:
5783 ret = register_ftrace_function_probe(glob, ops, count);
5784
5785 if (ret >= 0)
5786 alloc_snapshot(&global_trace);
5787
5788 return ret < 0 ? ret : 0;
5789}
5790
5791static struct ftrace_func_command ftrace_snapshot_cmd = {
5792 .name = "snapshot",
5793 .func = ftrace_trace_snapshot_callback,
5794};
5795
Tom Zanussi38de93a2013-10-24 08:34:18 -05005796static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005797{
5798 return register_ftrace_command(&ftrace_snapshot_cmd);
5799}
5800#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005801static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005802#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005803
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005804struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005805{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005806 if (tr->dir)
5807 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005808
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005809 if (!debugfs_initialized())
5810 return NULL;
5811
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005812 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5813 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005814
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005815 if (!tr->dir)
5816 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005817
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005818 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005819}
5820
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005821struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005822{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005823 return tracing_init_dentry_tr(&global_trace);
5824}
5825
5826static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5827{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005828 struct dentry *d_tracer;
5829
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005830 if (tr->percpu_dir)
5831 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005832
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005833 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005834 if (!d_tracer)
5835 return NULL;
5836
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005837 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005838
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005839 WARN_ONCE(!tr->percpu_dir,
5840 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005841
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005842 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005843}
5844
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005845static struct dentry *
5846trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5847 void *data, long cpu, const struct file_operations *fops)
5848{
5849 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5850
5851 if (ret) /* See tracing_get_cpu() */
5852 ret->d_inode->i_cdev = (void *)(cpu + 1);
5853 return ret;
5854}
5855
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005856static void
5857tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005858{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005859 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005860 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005861 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005862
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005863 if (!d_percpu)
5864 return;
5865
Steven Rostedtdd49a382010-10-20 21:51:26 -04005866 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005867 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5868 if (!d_cpu) {
5869 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5870 return;
5871 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005872
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005873 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005874 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005875 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005876
5877 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005878 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005879 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005880
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005881 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005882 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005883
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005884 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005885 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005886
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005887 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005888 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005889
5890#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005891 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005892 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005893
Oleg Nesterov649e9c702013-07-23 17:25:54 +02005894 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005895 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005896#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005897}
5898
Steven Rostedt60a11772008-05-12 21:20:44 +02005899#ifdef CONFIG_FTRACE_SELFTEST
5900/* Let selftest have access to static functions in this file */
5901#include "trace_selftest.c"
5902#endif
5903
Steven Rostedt577b7852009-02-26 23:43:05 -05005904struct trace_option_dentry {
5905 struct tracer_opt *opt;
5906 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005907 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005908 struct dentry *entry;
5909};
5910
5911static ssize_t
5912trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5913 loff_t *ppos)
5914{
5915 struct trace_option_dentry *topt = filp->private_data;
5916 char *buf;
5917
5918 if (topt->flags->val & topt->opt->bit)
5919 buf = "1\n";
5920 else
5921 buf = "0\n";
5922
5923 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5924}
5925
5926static ssize_t
5927trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5928 loff_t *ppos)
5929{
5930 struct trace_option_dentry *topt = filp->private_data;
5931 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005932 int ret;
5933
Peter Huewe22fe9b52011-06-07 21:58:27 +02005934 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5935 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005936 return ret;
5937
Li Zefan8d18eaa2009-12-08 11:17:06 +08005938 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005939 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005940
5941 if (!!(topt->flags->val & topt->opt->bit) != val) {
5942 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005943 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005944 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005945 mutex_unlock(&trace_types_lock);
5946 if (ret)
5947 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005948 }
5949
5950 *ppos += cnt;
5951
5952 return cnt;
5953}
5954
5955
5956static const struct file_operations trace_options_fops = {
5957 .open = tracing_open_generic,
5958 .read = trace_options_read,
5959 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005960 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005961};
5962
Steven Rostedta8259072009-02-26 22:19:12 -05005963static ssize_t
5964trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5965 loff_t *ppos)
5966{
5967 long index = (long)filp->private_data;
5968 char *buf;
5969
5970 if (trace_flags & (1 << index))
5971 buf = "1\n";
5972 else
5973 buf = "0\n";
5974
5975 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5976}
5977
5978static ssize_t
5979trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5980 loff_t *ppos)
5981{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005982 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005983 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005984 unsigned long val;
5985 int ret;
5986
Peter Huewe22fe9b52011-06-07 21:58:27 +02005987 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5988 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005989 return ret;
5990
Zhaoleif2d84b62009-08-07 18:55:48 +08005991 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005992 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005993
5994 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005995 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005996 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005997
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005998 if (ret < 0)
5999 return ret;
6000
Steven Rostedta8259072009-02-26 22:19:12 -05006001 *ppos += cnt;
6002
6003 return cnt;
6004}
6005
Steven Rostedta8259072009-02-26 22:19:12 -05006006static const struct file_operations trace_options_core_fops = {
6007 .open = tracing_open_generic,
6008 .read = trace_options_core_read,
6009 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006010 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006011};
6012
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006013struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04006014 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006015 struct dentry *parent,
6016 void *data,
6017 const struct file_operations *fops)
6018{
6019 struct dentry *ret;
6020
6021 ret = debugfs_create_file(name, mode, parent, data, fops);
6022 if (!ret)
6023 pr_warning("Could not create debugfs '%s' entry\n", name);
6024
6025 return ret;
6026}
6027
6028
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006029static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006030{
6031 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006032
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006033 if (tr->options)
6034 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006035
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006036 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006037 if (!d_tracer)
6038 return NULL;
6039
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006040 tr->options = debugfs_create_dir("options", d_tracer);
6041 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006042 pr_warning("Could not create debugfs directory 'options'\n");
6043 return NULL;
6044 }
6045
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006046 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006047}
6048
Steven Rostedt577b7852009-02-26 23:43:05 -05006049static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006050create_trace_option_file(struct trace_array *tr,
6051 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006052 struct tracer_flags *flags,
6053 struct tracer_opt *opt)
6054{
6055 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006056
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006058 if (!t_options)
6059 return;
6060
6061 topt->flags = flags;
6062 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006063 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006064
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006065 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006066 &trace_options_fops);
6067
Steven Rostedt577b7852009-02-26 23:43:05 -05006068}
6069
6070static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006071create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006072{
6073 struct trace_option_dentry *topts;
6074 struct tracer_flags *flags;
6075 struct tracer_opt *opts;
6076 int cnt;
6077
6078 if (!tracer)
6079 return NULL;
6080
6081 flags = tracer->flags;
6082
6083 if (!flags || !flags->opts)
6084 return NULL;
6085
6086 opts = flags->opts;
6087
6088 for (cnt = 0; opts[cnt].name; cnt++)
6089 ;
6090
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006091 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006092 if (!topts)
6093 return NULL;
6094
6095 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006096 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006097 &opts[cnt]);
6098
6099 return topts;
6100}
6101
6102static void
6103destroy_trace_option_files(struct trace_option_dentry *topts)
6104{
6105 int cnt;
6106
6107 if (!topts)
6108 return;
6109
6110 for (cnt = 0; topts[cnt].opt; cnt++) {
6111 if (topts[cnt].entry)
6112 debugfs_remove(topts[cnt].entry);
6113 }
6114
6115 kfree(topts);
6116}
6117
Steven Rostedta8259072009-02-26 22:19:12 -05006118static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006119create_trace_option_core_file(struct trace_array *tr,
6120 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006121{
6122 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006123
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006124 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006125 if (!t_options)
6126 return NULL;
6127
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006128 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006129 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006130}
6131
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006132static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006133{
6134 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006135 int i;
6136
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006137 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006138 if (!t_options)
6139 return;
6140
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006141 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006142 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006143}
6144
Steven Rostedt499e5472012-02-22 15:50:28 -05006145static ssize_t
6146rb_simple_read(struct file *filp, char __user *ubuf,
6147 size_t cnt, loff_t *ppos)
6148{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006149 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006150 char buf[64];
6151 int r;
6152
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006153 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006154 r = sprintf(buf, "%d\n", r);
6155
6156 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6157}
6158
6159static ssize_t
6160rb_simple_write(struct file *filp, const char __user *ubuf,
6161 size_t cnt, loff_t *ppos)
6162{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006163 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006164 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006165 unsigned long val;
6166 int ret;
6167
6168 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6169 if (ret)
6170 return ret;
6171
6172 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006173 mutex_lock(&trace_types_lock);
6174 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006175 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006176 if (tr->current_trace->start)
6177 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006178 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006179 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006180 if (tr->current_trace->stop)
6181 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006182 }
6183 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006184 }
6185
6186 (*ppos)++;
6187
6188 return cnt;
6189}
6190
6191static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006192 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006193 .read = rb_simple_read,
6194 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006195 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006196 .llseek = default_llseek,
6197};
6198
Steven Rostedt277ba042012-08-03 16:10:49 -04006199struct dentry *trace_instance_dir;
6200
6201static void
6202init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6203
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006204static int
6205allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006206{
6207 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006208
6209 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6210
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006211 buf->tr = tr;
6212
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006213 buf->buffer = ring_buffer_alloc(size, rb_flags);
6214 if (!buf->buffer)
6215 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006216
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006217 buf->data = alloc_percpu(struct trace_array_cpu);
6218 if (!buf->data) {
6219 ring_buffer_free(buf->buffer);
6220 return -ENOMEM;
6221 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006222
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006223 /* Allocate the first page for all buffers */
6224 set_buffer_entries(&tr->trace_buffer,
6225 ring_buffer_size(tr->trace_buffer.buffer, 0));
6226
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006227 return 0;
6228}
6229
6230static int allocate_trace_buffers(struct trace_array *tr, int size)
6231{
6232 int ret;
6233
6234 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6235 if (ret)
6236 return ret;
6237
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006238#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006239 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6240 allocate_snapshot ? size : 1);
6241 if (WARN_ON(ret)) {
6242 ring_buffer_free(tr->trace_buffer.buffer);
6243 free_percpu(tr->trace_buffer.data);
6244 return -ENOMEM;
6245 }
6246 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006247
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006248 /*
6249 * Only the top level trace array gets its snapshot allocated
6250 * from the kernel command line.
6251 */
6252 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006253#endif
6254 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006255}
6256
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006257static void free_trace_buffer(struct trace_buffer *buf)
6258{
6259 if (buf->buffer) {
6260 ring_buffer_free(buf->buffer);
6261 buf->buffer = NULL;
6262 free_percpu(buf->data);
6263 buf->data = NULL;
6264 }
6265}
6266
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006267static void free_trace_buffers(struct trace_array *tr)
6268{
6269 if (!tr)
6270 return;
6271
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006272 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006273
6274#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006275 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006276#endif
6277}
6278
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006279static int new_instance_create(const char *name)
6280{
Steven Rostedt277ba042012-08-03 16:10:49 -04006281 struct trace_array *tr;
6282 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006283
6284 mutex_lock(&trace_types_lock);
6285
6286 ret = -EEXIST;
6287 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6288 if (tr->name && strcmp(tr->name, name) == 0)
6289 goto out_unlock;
6290 }
6291
6292 ret = -ENOMEM;
6293 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6294 if (!tr)
6295 goto out_unlock;
6296
6297 tr->name = kstrdup(name, GFP_KERNEL);
6298 if (!tr->name)
6299 goto out_free_tr;
6300
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006301 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6302 goto out_free_tr;
6303
6304 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6305
Steven Rostedt277ba042012-08-03 16:10:49 -04006306 raw_spin_lock_init(&tr->start_lock);
6307
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006308 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6309
Steven Rostedt277ba042012-08-03 16:10:49 -04006310 tr->current_trace = &nop_trace;
6311
6312 INIT_LIST_HEAD(&tr->systems);
6313 INIT_LIST_HEAD(&tr->events);
6314
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006315 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006316 goto out_free_tr;
6317
Steven Rostedt277ba042012-08-03 16:10:49 -04006318 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6319 if (!tr->dir)
6320 goto out_free_tr;
6321
6322 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006323 if (ret) {
6324 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006325 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006326 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006327
6328 init_tracer_debugfs(tr, tr->dir);
6329
6330 list_add(&tr->list, &ftrace_trace_arrays);
6331
6332 mutex_unlock(&trace_types_lock);
6333
6334 return 0;
6335
6336 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006337 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006338 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006339 kfree(tr->name);
6340 kfree(tr);
6341
6342 out_unlock:
6343 mutex_unlock(&trace_types_lock);
6344
6345 return ret;
6346
6347}
6348
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006349static int instance_delete(const char *name)
6350{
6351 struct trace_array *tr;
6352 int found = 0;
6353 int ret;
6354
6355 mutex_lock(&trace_types_lock);
6356
6357 ret = -ENODEV;
6358 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6359 if (tr->name && strcmp(tr->name, name) == 0) {
6360 found = 1;
6361 break;
6362 }
6363 }
6364 if (!found)
6365 goto out_unlock;
6366
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006367 ret = -EBUSY;
6368 if (tr->ref)
6369 goto out_unlock;
6370
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006371 list_del(&tr->list);
6372
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006373 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006374 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006375 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006376 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006377 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006378
6379 kfree(tr->name);
6380 kfree(tr);
6381
6382 ret = 0;
6383
6384 out_unlock:
6385 mutex_unlock(&trace_types_lock);
6386
6387 return ret;
6388}
6389
Steven Rostedt277ba042012-08-03 16:10:49 -04006390static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6391{
6392 struct dentry *parent;
6393 int ret;
6394
6395 /* Paranoid: Make sure the parent is the "instances" directory */
6396 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6397 if (WARN_ON_ONCE(parent != trace_instance_dir))
6398 return -ENOENT;
6399
6400 /*
6401 * The inode mutex is locked, but debugfs_create_dir() will also
6402 * take the mutex. As the instances directory can not be destroyed
6403 * or changed in any other way, it is safe to unlock it, and
6404 * let the dentry try. If two users try to make the same dir at
6405 * the same time, then the new_instance_create() will determine the
6406 * winner.
6407 */
6408 mutex_unlock(&inode->i_mutex);
6409
6410 ret = new_instance_create(dentry->d_iname);
6411
6412 mutex_lock(&inode->i_mutex);
6413
6414 return ret;
6415}
6416
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006417static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6418{
6419 struct dentry *parent;
6420 int ret;
6421
6422 /* Paranoid: Make sure the parent is the "instances" directory */
6423 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6424 if (WARN_ON_ONCE(parent != trace_instance_dir))
6425 return -ENOENT;
6426
6427 /* The caller did a dget() on dentry */
6428 mutex_unlock(&dentry->d_inode->i_mutex);
6429
6430 /*
6431 * The inode mutex is locked, but debugfs_create_dir() will also
6432 * take the mutex. As the instances directory can not be destroyed
6433 * or changed in any other way, it is safe to unlock it, and
6434 * let the dentry try. If two users try to make the same dir at
6435 * the same time, then the instance_delete() will determine the
6436 * winner.
6437 */
6438 mutex_unlock(&inode->i_mutex);
6439
6440 ret = instance_delete(dentry->d_iname);
6441
6442 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6443 mutex_lock(&dentry->d_inode->i_mutex);
6444
6445 return ret;
6446}
6447
Steven Rostedt277ba042012-08-03 16:10:49 -04006448static const struct inode_operations instance_dir_inode_operations = {
6449 .lookup = simple_lookup,
6450 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006451 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006452};
6453
6454static __init void create_trace_instances(struct dentry *d_tracer)
6455{
6456 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6457 if (WARN_ON(!trace_instance_dir))
6458 return;
6459
6460 /* Hijack the dir inode operations, to allow mkdir */
6461 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6462}
6463
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006464static void
6465init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6466{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006467 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006468
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006469 trace_create_file("available_tracers", 0444, d_tracer,
6470 tr, &show_traces_fops);
6471
6472 trace_create_file("current_tracer", 0644, d_tracer,
6473 tr, &set_tracer_fops);
6474
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006475 trace_create_file("tracing_cpumask", 0644, d_tracer,
6476 tr, &tracing_cpumask_fops);
6477
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006478 trace_create_file("trace_options", 0644, d_tracer,
6479 tr, &tracing_iter_fops);
6480
6481 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006482 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006483
6484 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006485 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006486
6487 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006488 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006489
6490 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6491 tr, &tracing_total_entries_fops);
6492
Wang YanQing238ae932013-05-26 16:52:01 +08006493 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006494 tr, &tracing_free_buffer_fops);
6495
6496 trace_create_file("trace_marker", 0220, d_tracer,
6497 tr, &tracing_mark_fops);
6498
6499 trace_create_file("trace_clock", 0644, d_tracer, tr,
6500 &trace_clock_fops);
6501
6502 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006503 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006504
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006505#ifdef CONFIG_TRACER_MAX_TRACE
6506 trace_create_file("tracing_max_latency", 0644, d_tracer,
6507 &tr->max_latency, &tracing_max_lat_fops);
6508#endif
6509
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006510 if (ftrace_create_function_files(tr, d_tracer))
6511 WARN(1, "Could not allocate function filter files");
6512
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006513#ifdef CONFIG_TRACER_SNAPSHOT
6514 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006515 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006516#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006517
6518 for_each_tracing_cpu(cpu)
6519 tracing_init_debugfs_percpu(tr, cpu);
6520
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006521}
6522
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006523static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006524{
6525 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006526
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006527 trace_access_lock_init();
6528
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006529 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006530 if (!d_tracer)
6531 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006532
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006533 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006534
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006535 trace_create_file("tracing_thresh", 0644, d_tracer,
6536 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006537
Li Zefan339ae5d2009-04-17 10:34:30 +08006538 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006539 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006540
Avadh Patel69abe6a2009-04-10 16:04:48 -04006541 trace_create_file("saved_cmdlines", 0444, d_tracer,
6542 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006543
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006544 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6545 NULL, &tracing_saved_cmdlines_size_fops);
6546
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006547#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006548 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6549 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006550#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006551
Steven Rostedt277ba042012-08-03 16:10:49 -04006552 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006553
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006554 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006555
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006556 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006557}
6558
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006559static int trace_panic_handler(struct notifier_block *this,
6560 unsigned long event, void *unused)
6561{
Steven Rostedt944ac422008-10-23 19:26:08 -04006562 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006563 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006564 return NOTIFY_OK;
6565}
6566
6567static struct notifier_block trace_panic_notifier = {
6568 .notifier_call = trace_panic_handler,
6569 .next = NULL,
6570 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6571};
6572
6573static int trace_die_handler(struct notifier_block *self,
6574 unsigned long val,
6575 void *data)
6576{
6577 switch (val) {
6578 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006579 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006580 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006581 break;
6582 default:
6583 break;
6584 }
6585 return NOTIFY_OK;
6586}
6587
6588static struct notifier_block trace_die_notifier = {
6589 .notifier_call = trace_die_handler,
6590 .priority = 200
6591};
6592
6593/*
6594 * printk is set to max of 1024, we really don't need it that big.
6595 * Nothing should be printing 1000 characters anyway.
6596 */
6597#define TRACE_MAX_PRINT 1000
6598
6599/*
6600 * Define here KERN_TRACE so that we have one place to modify
6601 * it if we decide to change what log level the ftrace dump
6602 * should be at.
6603 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006604#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006605
Jason Wessel955b61e2010-08-05 09:22:23 -05006606void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006607trace_printk_seq(struct trace_seq *s)
6608{
6609 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006610 if (s->len >= TRACE_MAX_PRINT)
6611 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006612
6613 /* should be zero ended, but we are paranoid. */
6614 s->buffer[s->len] = 0;
6615
6616 printk(KERN_TRACE "%s", s->buffer);
6617
Steven Rostedtf9520752009-03-02 14:04:40 -05006618 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006619}
6620
Jason Wessel955b61e2010-08-05 09:22:23 -05006621void trace_init_global_iter(struct trace_iterator *iter)
6622{
6623 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006624 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006625 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006626 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006627
6628 if (iter->trace && iter->trace->open)
6629 iter->trace->open(iter);
6630
6631 /* Annotate start of buffers if we had overruns */
6632 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6633 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6634
6635 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6636 if (trace_clocks[iter->tr->clock_id].in_ns)
6637 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006638}
6639
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006640void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006641{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006642 /* use static because iter can be a bit big for the stack */
6643 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006644 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006645 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006646 unsigned long flags;
6647 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006648
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006649 /* Only allow one dump user at a time. */
6650 if (atomic_inc_return(&dump_running) != 1) {
6651 atomic_dec(&dump_running);
6652 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006653 }
6654
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006655 /*
6656 * Always turn off tracing when we dump.
6657 * We don't need to show trace output of what happens
6658 * between multiple crashes.
6659 *
6660 * If the user does a sysrq-z, then they can re-enable
6661 * tracing with echo 1 > tracing_on.
6662 */
6663 tracing_off();
6664
6665 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006666
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006667 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006668 trace_init_global_iter(&iter);
6669
Steven Rostedtd7690412008-10-01 00:29:53 -04006670 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006671 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006672 }
6673
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006674 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6675
Török Edwinb54d3de2008-11-22 13:28:48 +02006676 /* don't look at user memory in panic mode */
6677 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6678
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006679 switch (oops_dump_mode) {
6680 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006681 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006682 break;
6683 case DUMP_ORIG:
6684 iter.cpu_file = raw_smp_processor_id();
6685 break;
6686 case DUMP_NONE:
6687 goto out_enable;
6688 default:
6689 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006690 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006691 }
6692
6693 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006694
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006695 /* Did function tracer already get disabled? */
6696 if (ftrace_is_dead()) {
6697 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6698 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6699 }
6700
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006701 /*
6702 * We need to stop all tracing on all CPUS to read the
6703 * the next buffer. This is a bit expensive, but is
6704 * not done often. We fill all what we can read,
6705 * and then release the locks again.
6706 */
6707
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006708 while (!trace_empty(&iter)) {
6709
6710 if (!cnt)
6711 printk(KERN_TRACE "---------------------------------\n");
6712
6713 cnt++;
6714
6715 /* reset all but tr, trace, and overruns */
6716 memset(&iter.seq, 0,
6717 sizeof(struct trace_iterator) -
6718 offsetof(struct trace_iterator, seq));
6719 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6720 iter.pos = -1;
6721
Jason Wessel955b61e2010-08-05 09:22:23 -05006722 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006723 int ret;
6724
6725 ret = print_trace_line(&iter);
6726 if (ret != TRACE_TYPE_NO_CONSUME)
6727 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006728 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006729 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006730
6731 trace_printk_seq(&iter.seq);
6732 }
6733
6734 if (!cnt)
6735 printk(KERN_TRACE " (ftrace buffer empty)\n");
6736 else
6737 printk(KERN_TRACE "---------------------------------\n");
6738
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006739 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006740 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006741
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006742 for_each_tracing_cpu(cpu) {
6743 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006744 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006745 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006746 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006747}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006748EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006749
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006750__init static int tracer_alloc_buffers(void)
6751{
Steven Rostedt73c51622009-03-11 13:42:01 -04006752 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306753 int ret = -ENOMEM;
6754
David Sharp750912f2010-12-08 13:46:47 -08006755
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306756 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6757 goto out;
6758
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006759 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306760 goto out_free_buffer_mask;
6761
Steven Rostedt07d777f2011-09-22 14:01:55 -04006762 /* Only allocate trace_printk buffers if a trace_printk exists */
6763 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006764 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006765 trace_printk_init_buffers();
6766
Steven Rostedt73c51622009-03-11 13:42:01 -04006767 /* To save memory, keep the ring buffer size to its minimum */
6768 if (ring_buffer_expanded)
6769 ring_buf_size = trace_buf_size;
6770 else
6771 ring_buf_size = 1;
6772
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306773 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006774 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006775
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006776 raw_spin_lock_init(&global_trace.start_lock);
6777
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006778 /* Used for event triggers */
6779 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6780 if (!temp_buffer)
6781 goto out_free_cpumask;
6782
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006783 if (trace_create_savedcmd() < 0)
6784 goto out_free_temp_buffer;
6785
Steven Rostedtab464282008-05-12 21:21:00 +02006786 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006787 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006788 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6789 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006790 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006791 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006792
Steven Rostedt499e5472012-02-22 15:50:28 -05006793 if (global_trace.buffer_disabled)
6794 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006795
Steven Rostedte1e232c2014-02-10 23:38:46 -05006796 if (trace_boot_clock) {
6797 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6798 if (ret < 0)
6799 pr_warning("Trace clock %s not defined, going back to default\n",
6800 trace_boot_clock);
6801 }
6802
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006803 /*
6804 * register_tracer() might reference current_trace, so it
6805 * needs to be set before we register anything. This is
6806 * just a bootstrap of current_trace anyway.
6807 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006808 global_trace.current_trace = &nop_trace;
6809
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006810 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6811
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006812 ftrace_init_global_array_ops(&global_trace);
6813
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006814 register_tracer(&nop_trace);
6815
Steven Rostedt60a11772008-05-12 21:20:44 +02006816 /* All seems OK, enable tracing */
6817 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006818
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006819 atomic_notifier_chain_register(&panic_notifier_list,
6820 &trace_panic_notifier);
6821
6822 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006823
Steven Rostedtae63b31e2012-05-03 23:09:03 -04006824 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6825
6826 INIT_LIST_HEAD(&global_trace.systems);
6827 INIT_LIST_HEAD(&global_trace.events);
6828 list_add(&global_trace.list, &ftrace_trace_arrays);
6829
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006830 while (trace_boot_options) {
6831 char *option;
6832
6833 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006834 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006835 }
6836
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006837 register_snapshot_cmd();
6838
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006839 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006840
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006841out_free_savedcmd:
6842 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006843out_free_temp_buffer:
6844 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306845out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006846 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306847out_free_buffer_mask:
6848 free_cpumask_var(tracing_buffer_mask);
6849out:
6850 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006851}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006852
6853__init static int clear_boot_tracer(void)
6854{
6855 /*
6856 * The default tracer at boot buffer is an init section.
6857 * This function is called in lateinit. If we did not
6858 * find the boot tracer, then clear it out, to prevent
6859 * later registration from accessing the buffer that is
6860 * about to be freed.
6861 */
6862 if (!default_bootup_tracer)
6863 return 0;
6864
6865 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6866 default_bootup_tracer);
6867 default_bootup_tracer = NULL;
6868
6869 return 0;
6870}
6871
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006872early_initcall(tracer_alloc_buffers);
6873fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006874late_initcall(clear_boot_tracer);