blob: 54e3b8711aca0cecc0eea79b9bf3b7895eebdcca [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080043#include <linux/trace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060044#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020046#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050047#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010049/*
Steven Rostedt73c51622009-03-11 13:42:01 -040050 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050053bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040054
55/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010059 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * at the same time, giving false positive or negative results.
61 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063
Steven Rostedtb2821ae2009-02-02 21:38:32 -050064/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
Li Zefan020e5f82009-07-01 10:47:05 +080067bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050068
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050069/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050072static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050073
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010074/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050079static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010081{
82 return 0;
83}
Steven Rostedt0f048702008-11-05 16:05:44 -050084
85/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040086 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
90static DEFINE_PER_CPU(bool, trace_cmdline_save);
91
92/*
Steven Rostedt0f048702008-11-05 16:05:44 -050093 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
Hannes Eder4fd27352009-02-10 19:44:12 +010098static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050099
Jason Wessel955b61e2010-08-05 09:22:23 -0500100cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200101
Steven Rostedt944ac422008-10-23 19:26:08 -0400102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400116 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200117
118enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400119
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400123#ifdef CONFIG_TRACE_ENUM_MAP_FILE
124/* Map of enums to their values, for "enum_map" file */
125struct trace_enum_map_head {
126 struct module *mod;
127 unsigned long length;
128};
129
130union trace_enum_map_item;
131
132struct trace_enum_map_tail {
133 /*
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "enum_string"
136 */
137 union trace_enum_map_item *next;
138 const char *end; /* points to NULL */
139};
140
141static DEFINE_MUTEX(trace_enum_mutex);
142
143/*
144 * The trace_enum_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved enum_map items.
149 */
150union trace_enum_map_item {
151 struct trace_enum_map map;
152 struct trace_enum_map_head head;
153 struct trace_enum_map_tail tail;
154};
155
156static union trace_enum_map_item *trace_enum_maps;
157#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
158
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500160
Li Zefanee6c2c12009-09-18 14:06:47 +0800161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100164
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165static bool allocate_snapshot;
166
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200167static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100168{
Chen Gang67012ab2013-04-08 12:06:44 +0800169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500170 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400171 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173 return 1;
174}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176
Steven Rostedt944ac422008-10-23 19:26:08 -0400177static int __init set_ftrace_dump_on_oops(char *str)
178{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200192
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400193static int __init stop_trace_on_warning(char *str)
194{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400197 return 1;
198}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200199__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400201static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400208__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500209
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static int __init set_trace_boot_options(char *str)
214{
Chen Gang67012ab2013-04-08 12:06:44 +0800215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
Steven Rostedte1e232c2014-02-10 23:38:46 -0500220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400238
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100239unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
260 TRACE_ITER_EVENT_FORK
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200265 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200269
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400270LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400303int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400309 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 return 1;
311 }
312
313 return 0;
314}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500315
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
Steven Rostedtd8275c42016-04-14 12:15:22 -0400322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400516 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700573 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400574 return trace_clock_local();
575
Alexander Z Lam94571582013-08-02 18:36:16 -0700576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400578
579 return ts;
580}
581
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100582u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
Steven Rostedt90369902008-11-05 16:05:44 -0500596int tracing_is_enabled(void)
597{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500605}
606
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200607/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400618
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200622static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700627DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500657 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
Steven Rostedtae3b5092013-01-23 15:22:59 -0500663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500673 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400730{
731}
732
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400733#endif
734
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400760static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200777/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
793 __this_cpu_write(trace_cmdline_save, true);
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800818 int pc;
819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800821 return 0;
822
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800823 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500824
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800868 int pc;
869
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800871 return 0;
872
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800873 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500874
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500896#ifdef CONFIG_TRACER_SNAPSHOT
897/**
898 * trace_snapshot - take a snapshot of the current buffer.
899 *
900 * This causes a swap between the snapshot buffer and the current live
901 * tracing buffer. You can use this to take snapshots of the live
902 * trace when some condition is triggered, but continue to trace.
903 *
904 * Note, make sure to allocate the snapshot with either
905 * a tracing_snapshot_alloc(), or by doing it manually
906 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
907 *
908 * If the snapshot buffer is not allocated, it will stop tracing.
909 * Basically making a permanent snapshot.
910 */
911void tracing_snapshot(void)
912{
913 struct trace_array *tr = &global_trace;
914 struct tracer *tracer = tr->current_trace;
915 unsigned long flags;
916
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500917 if (in_nmi()) {
918 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
919 internal_trace_puts("*** snapshot is being ignored ***\n");
920 return;
921 }
922
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500923 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500924 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
925 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500926 tracing_off();
927 return;
928 }
929
930 /* Note, snapshot can not be used when the tracer uses it */
931 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500932 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
933 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500934 return;
935 }
936
937 local_irq_save(flags);
938 update_max_tr(tr, current, smp_processor_id());
939 local_irq_restore(flags);
940}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500941EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500942
943static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
944 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400945static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
946
947static int alloc_snapshot(struct trace_array *tr)
948{
949 int ret;
950
951 if (!tr->allocated_snapshot) {
952
953 /* allocate spare buffer */
954 ret = resize_buffer_duplicate_size(&tr->max_buffer,
955 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
956 if (ret < 0)
957 return ret;
958
959 tr->allocated_snapshot = true;
960 }
961
962 return 0;
963}
964
Fabian Frederickad1438a2014-04-17 21:44:42 +0200965static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400966{
967 /*
968 * We don't free the ring buffer. instead, resize it because
969 * The max_tr ring buffer has some state (e.g. ring->clock) and
970 * we want preserve it.
971 */
972 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
973 set_buffer_entries(&tr->max_buffer, 1);
974 tracing_reset_online_cpus(&tr->max_buffer);
975 tr->allocated_snapshot = false;
976}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500977
978/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500979 * tracing_alloc_snapshot - allocate snapshot buffer.
980 *
981 * This only allocates the snapshot buffer if it isn't already
982 * allocated - it doesn't also take a snapshot.
983 *
984 * This is meant to be used in cases where the snapshot buffer needs
985 * to be set up for events that can't sleep but need to be able to
986 * trigger a snapshot.
987 */
988int tracing_alloc_snapshot(void)
989{
990 struct trace_array *tr = &global_trace;
991 int ret;
992
993 ret = alloc_snapshot(tr);
994 WARN_ON(ret < 0);
995
996 return ret;
997}
998EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
999
1000/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001001 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1002 *
1003 * This is similar to trace_snapshot(), but it will allocate the
1004 * snapshot buffer if it isn't already allocated. Use this only
1005 * where it is safe to sleep, as the allocation may sleep.
1006 *
1007 * This causes a swap between the snapshot buffer and the current live
1008 * tracing buffer. You can use this to take snapshots of the live
1009 * trace when some condition is triggered, but continue to trace.
1010 */
1011void tracing_snapshot_alloc(void)
1012{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001013 int ret;
1014
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001015 ret = tracing_alloc_snapshot();
1016 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001017 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001018
1019 tracing_snapshot();
1020}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001021EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001022#else
1023void tracing_snapshot(void)
1024{
1025 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1026}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001027EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001028int tracing_alloc_snapshot(void)
1029{
1030 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1031 return -ENODEV;
1032}
1033EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001034void tracing_snapshot_alloc(void)
1035{
1036 /* Give warning */
1037 tracing_snapshot();
1038}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001039EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001040#endif /* CONFIG_TRACER_SNAPSHOT */
1041
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001042static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001043{
1044 if (tr->trace_buffer.buffer)
1045 ring_buffer_record_off(tr->trace_buffer.buffer);
1046 /*
1047 * This flag is looked at when buffers haven't been allocated
1048 * yet, or by some tracers (like irqsoff), that just want to
1049 * know if the ring buffer has been disabled, but it can handle
1050 * races of where it gets disabled but we still do a record.
1051 * As the check is in the fast path of the tracers, it is more
1052 * important to be fast than accurate.
1053 */
1054 tr->buffer_disabled = 1;
1055 /* Make the flag seen by readers */
1056 smp_wmb();
1057}
1058
Steven Rostedt499e5472012-02-22 15:50:28 -05001059/**
1060 * tracing_off - turn off tracing buffers
1061 *
1062 * This function stops the tracing buffers from recording data.
1063 * It does not disable any overhead the tracers themselves may
1064 * be causing. This function simply causes all recording to
1065 * the ring buffers to fail.
1066 */
1067void tracing_off(void)
1068{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001069 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001070}
1071EXPORT_SYMBOL_GPL(tracing_off);
1072
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001073void disable_trace_on_warning(void)
1074{
1075 if (__disable_trace_on_warning)
1076 tracing_off();
1077}
1078
Steven Rostedt499e5472012-02-22 15:50:28 -05001079/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001080 * tracer_tracing_is_on - show real state of ring buffer enabled
1081 * @tr : the trace array to know if ring buffer is enabled
1082 *
1083 * Shows real state of the ring buffer if it is enabled or not.
1084 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001085int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001086{
1087 if (tr->trace_buffer.buffer)
1088 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1089 return !tr->buffer_disabled;
1090}
1091
Steven Rostedt499e5472012-02-22 15:50:28 -05001092/**
1093 * tracing_is_on - show state of ring buffers enabled
1094 */
1095int tracing_is_on(void)
1096{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001097 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001098}
1099EXPORT_SYMBOL_GPL(tracing_is_on);
1100
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001101static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001102{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001103 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001104
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001105 if (!str)
1106 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001107 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001108 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001109 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001110 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001111 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001112 return 1;
1113}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001114__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001115
Tim Bird0e950172010-02-25 15:36:43 -08001116static int __init set_tracing_thresh(char *str)
1117{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001118 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001119 int ret;
1120
1121 if (!str)
1122 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001123 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001124 if (ret < 0)
1125 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001126 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001127 return 1;
1128}
1129__setup("tracing_thresh=", set_tracing_thresh);
1130
Steven Rostedt57f50be2008-05-12 21:20:44 +02001131unsigned long nsecs_to_usecs(unsigned long nsecs)
1132{
1133 return nsecs / 1000;
1134}
1135
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001136/*
1137 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1138 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1139 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1140 * of strings in the order that the enums were defined.
1141 */
1142#undef C
1143#define C(a, b) b
1144
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001145/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001146static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001147 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001148 NULL
1149};
1150
Zhaolei5079f322009-08-25 16:12:56 +08001151static struct {
1152 u64 (*func)(void);
1153 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001154 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001155} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001156 { trace_clock_local, "local", 1 },
1157 { trace_clock_global, "global", 1 },
1158 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001159 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001160 { trace_clock, "perf", 1 },
1161 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001162 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Joel Fernandes80ec3552016-11-28 14:35:23 -08001163 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001164 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001165};
1166
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001167/*
1168 * trace_parser_get_init - gets the buffer for trace parser
1169 */
1170int trace_parser_get_init(struct trace_parser *parser, int size)
1171{
1172 memset(parser, 0, sizeof(*parser));
1173
1174 parser->buffer = kmalloc(size, GFP_KERNEL);
1175 if (!parser->buffer)
1176 return 1;
1177
1178 parser->size = size;
1179 return 0;
1180}
1181
1182/*
1183 * trace_parser_put - frees the buffer for trace parser
1184 */
1185void trace_parser_put(struct trace_parser *parser)
1186{
1187 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001188 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001189}
1190
1191/*
1192 * trace_get_user - reads the user input string separated by space
1193 * (matched by isspace(ch))
1194 *
1195 * For each string found the 'struct trace_parser' is updated,
1196 * and the function returns.
1197 *
1198 * Returns number of bytes read.
1199 *
1200 * See kernel/trace/trace.h for 'struct trace_parser' details.
1201 */
1202int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1203 size_t cnt, loff_t *ppos)
1204{
1205 char ch;
1206 size_t read = 0;
1207 ssize_t ret;
1208
1209 if (!*ppos)
1210 trace_parser_clear(parser);
1211
1212 ret = get_user(ch, ubuf++);
1213 if (ret)
1214 goto out;
1215
1216 read++;
1217 cnt--;
1218
1219 /*
1220 * The parser is not finished with the last write,
1221 * continue reading the user input without skipping spaces.
1222 */
1223 if (!parser->cont) {
1224 /* skip white space */
1225 while (cnt && isspace(ch)) {
1226 ret = get_user(ch, ubuf++);
1227 if (ret)
1228 goto out;
1229 read++;
1230 cnt--;
1231 }
1232
1233 /* only spaces were written */
1234 if (isspace(ch)) {
1235 *ppos += read;
1236 ret = read;
1237 goto out;
1238 }
1239
1240 parser->idx = 0;
1241 }
1242
1243 /* read the non-space input */
1244 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001245 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001246 parser->buffer[parser->idx++] = ch;
1247 else {
1248 ret = -EINVAL;
1249 goto out;
1250 }
1251 ret = get_user(ch, ubuf++);
1252 if (ret)
1253 goto out;
1254 read++;
1255 cnt--;
1256 }
1257
1258 /* We either got finished input or we have to wait for another call. */
1259 if (isspace(ch)) {
1260 parser->buffer[parser->idx] = 0;
1261 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001262 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001263 parser->cont = true;
1264 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001265 } else {
1266 ret = -EINVAL;
1267 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001268 }
1269
1270 *ppos += read;
1271 ret = read;
1272
1273out:
1274 return ret;
1275}
1276
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001277/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001278static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001279{
1280 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001281
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001282 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001283 return -EBUSY;
1284
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001285 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001286 if (cnt > len)
1287 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001288 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001289
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001290 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001291 return cnt;
1292}
1293
Tim Bird0e950172010-02-25 15:36:43 -08001294unsigned long __read_mostly tracing_thresh;
1295
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001296#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001297/*
1298 * Copy the new maximum trace into the separate maximum-trace
1299 * structure. (this way the maximum trace is permanently saved,
1300 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1301 */
1302static void
1303__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1304{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001305 struct trace_buffer *trace_buf = &tr->trace_buffer;
1306 struct trace_buffer *max_buf = &tr->max_buffer;
1307 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1308 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001309
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001310 max_buf->cpu = cpu;
1311 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001312
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001313 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001314 max_data->critical_start = data->critical_start;
1315 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001316
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001317 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001318 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001319 /*
1320 * If tsk == current, then use current_uid(), as that does not use
1321 * RCU. The irq tracer can be called out of RCU scope.
1322 */
1323 if (tsk == current)
1324 max_data->uid = current_uid();
1325 else
1326 max_data->uid = task_uid(tsk);
1327
Steven Rostedt8248ac02009-09-02 12:27:41 -04001328 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1329 max_data->policy = tsk->policy;
1330 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001331
1332 /* record this tasks comm */
1333 tracing_record_cmdline(tsk);
1334}
1335
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001336/**
1337 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1338 * @tr: tracer
1339 * @tsk: the task with the latency
1340 * @cpu: The cpu that initiated the trace.
1341 *
1342 * Flip the buffers between the @tr and the max_tr and record information
1343 * about which task was the cause of this latency.
1344 */
Ingo Molnare309b412008-05-12 21:20:51 +02001345void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001346update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1347{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001348 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001349
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001350 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001351 return;
1352
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001353 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001354
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001355 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001356 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001357 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001358 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001359 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001360
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001361 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001362
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001363 buf = tr->trace_buffer.buffer;
1364 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1365 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001366
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001367 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001368 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001369}
1370
1371/**
1372 * update_max_tr_single - only copy one trace over, and reset the rest
1373 * @tr - tracer
1374 * @tsk - task with the latency
1375 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001376 *
1377 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001378 */
Ingo Molnare309b412008-05-12 21:20:51 +02001379void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001380update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1381{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001382 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001383
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001384 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001385 return;
1386
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001387 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001388 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001389 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001390 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001391 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001392 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001393
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001394 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001395
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001396 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001397
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001398 if (ret == -EBUSY) {
1399 /*
1400 * We failed to swap the buffer due to a commit taking
1401 * place on this CPU. We fail to record, but we reset
1402 * the max trace buffer (no one writes directly to it)
1403 * and flag that it failed.
1404 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001405 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001406 "Failed to swap buffers due to commit in progress\n");
1407 }
1408
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001409 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001410
1411 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001412 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001413}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001414#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001415
Rabin Vincente30f53a2014-11-10 19:46:34 +01001416static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001417{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001418 /* Iterators are static, they should be filled or empty */
1419 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001420 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001421
Rabin Vincente30f53a2014-11-10 19:46:34 +01001422 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1423 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001424}
1425
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001426#ifdef CONFIG_FTRACE_STARTUP_TEST
1427static int run_tracer_selftest(struct tracer *type)
1428{
1429 struct trace_array *tr = &global_trace;
1430 struct tracer *saved_tracer = tr->current_trace;
1431 int ret;
1432
1433 if (!type->selftest || tracing_selftest_disabled)
1434 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001435
1436 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001437 * Run a selftest on this tracer.
1438 * Here we reset the trace buffer, and set the current
1439 * tracer to be this tracer. The tracer can then run some
1440 * internal tracing to verify that everything is in order.
1441 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001442 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001443 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001444
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001445 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001446
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001447#ifdef CONFIG_TRACER_MAX_TRACE
1448 if (type->use_max_tr) {
1449 /* If we expanded the buffers, make sure the max is expanded too */
1450 if (ring_buffer_expanded)
1451 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1452 RING_BUFFER_ALL_CPUS);
1453 tr->allocated_snapshot = true;
1454 }
1455#endif
1456
1457 /* the test is responsible for initializing and enabling */
1458 pr_info("Testing tracer %s: ", type->name);
1459 ret = type->selftest(type, tr);
1460 /* the test is responsible for resetting too */
1461 tr->current_trace = saved_tracer;
1462 if (ret) {
1463 printk(KERN_CONT "FAILED!\n");
1464 /* Add the warning after printing 'FAILED' */
1465 WARN_ON(1);
1466 return -1;
1467 }
1468 /* Only reset on passing, to avoid touching corrupted buffers */
1469 tracing_reset_online_cpus(&tr->trace_buffer);
1470
1471#ifdef CONFIG_TRACER_MAX_TRACE
1472 if (type->use_max_tr) {
1473 tr->allocated_snapshot = false;
1474
1475 /* Shrink the max buffer again */
1476 if (ring_buffer_expanded)
1477 ring_buffer_resize(tr->max_buffer.buffer, 1,
1478 RING_BUFFER_ALL_CPUS);
1479 }
1480#endif
1481
1482 printk(KERN_CONT "PASSED\n");
1483 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001485#else
1486static inline int run_tracer_selftest(struct tracer *type)
1487{
1488 return 0;
1489}
1490#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001491
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001492static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1493
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001494static void __init apply_trace_boot_options(void);
1495
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001496/**
1497 * register_tracer - register a tracer with the ftrace system.
1498 * @type - the plugin for the tracer
1499 *
1500 * Register a new plugin tracer.
1501 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001502int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001503{
1504 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001505 int ret = 0;
1506
1507 if (!type->name) {
1508 pr_info("Tracer must have a name\n");
1509 return -1;
1510 }
1511
Dan Carpenter24a461d2010-07-10 12:06:44 +02001512 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001513 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1514 return -1;
1515 }
1516
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001518
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001519 tracing_selftest_running = true;
1520
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001521 for (t = trace_types; t; t = t->next) {
1522 if (strcmp(type->name, t->name) == 0) {
1523 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001524 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525 type->name);
1526 ret = -1;
1527 goto out;
1528 }
1529 }
1530
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001531 if (!type->set_flag)
1532 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001533 if (!type->flags) {
1534 /*allocate a dummy tracer_flags*/
1535 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001536 if (!type->flags) {
1537 ret = -ENOMEM;
1538 goto out;
1539 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001540 type->flags->val = 0;
1541 type->flags->opts = dummy_tracer_opt;
1542 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001543 if (!type->flags->opts)
1544 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001545
Chunyu Hud39cdd22016-03-08 21:37:01 +08001546 /* store the tracer for __set_tracer_option */
1547 type->flags->trace = type;
1548
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001549 ret = run_tracer_selftest(type);
1550 if (ret < 0)
1551 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001552
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001553 type->next = trace_types;
1554 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001555 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001556
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001558 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001559 mutex_unlock(&trace_types_lock);
1560
Steven Rostedtdac74942009-02-05 01:13:38 -05001561 if (ret || !default_bootup_tracer)
1562 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001563
Li Zefanee6c2c12009-09-18 14:06:47 +08001564 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001565 goto out_unlock;
1566
1567 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1568 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001569 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001570 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001571
1572 apply_trace_boot_options();
1573
Steven Rostedtdac74942009-02-05 01:13:38 -05001574 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001575 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001576#ifdef CONFIG_FTRACE_STARTUP_TEST
1577 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1578 type->name);
1579#endif
1580
1581 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001582 return ret;
1583}
1584
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001585void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001586{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001587 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001588
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001589 if (!buffer)
1590 return;
1591
Steven Rostedtf6339032009-09-04 12:35:16 -04001592 ring_buffer_record_disable(buffer);
1593
1594 /* Make sure all commits have finished */
1595 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001596 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001597
1598 ring_buffer_record_enable(buffer);
1599}
1600
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001601void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001602{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001603 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001604 int cpu;
1605
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001606 if (!buffer)
1607 return;
1608
Steven Rostedt621968c2009-09-04 12:02:35 -04001609 ring_buffer_record_disable(buffer);
1610
1611 /* Make sure all commits have finished */
1612 synchronize_sched();
1613
Alexander Z Lam94571582013-08-02 18:36:16 -07001614 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001615
1616 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001617 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001618
1619 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001620}
1621
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001622/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001623void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001624{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001625 struct trace_array *tr;
1626
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001627 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001628 tracing_reset_online_cpus(&tr->trace_buffer);
1629#ifdef CONFIG_TRACER_MAX_TRACE
1630 tracing_reset_online_cpus(&tr->max_buffer);
1631#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001632 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001633}
1634
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001635#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001636#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001637static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001638struct saved_cmdlines_buffer {
1639 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1640 unsigned *map_cmdline_to_pid;
1641 unsigned cmdline_num;
1642 int cmdline_idx;
1643 char *saved_cmdlines;
1644};
1645static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001646
Steven Rostedt25b0b442008-05-12 21:21:00 +02001647/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001648static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001649
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001650static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001651{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001652 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1653}
1654
1655static inline void set_cmdline(int idx, const char *cmdline)
1656{
1657 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1658}
1659
1660static int allocate_cmdlines_buffer(unsigned int val,
1661 struct saved_cmdlines_buffer *s)
1662{
1663 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1664 GFP_KERNEL);
1665 if (!s->map_cmdline_to_pid)
1666 return -ENOMEM;
1667
1668 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1669 if (!s->saved_cmdlines) {
1670 kfree(s->map_cmdline_to_pid);
1671 return -ENOMEM;
1672 }
1673
1674 s->cmdline_idx = 0;
1675 s->cmdline_num = val;
1676 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1677 sizeof(s->map_pid_to_cmdline));
1678 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1679 val * sizeof(*s->map_cmdline_to_pid));
1680
1681 return 0;
1682}
1683
1684static int trace_create_savedcmd(void)
1685{
1686 int ret;
1687
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001688 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001689 if (!savedcmd)
1690 return -ENOMEM;
1691
1692 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1693 if (ret < 0) {
1694 kfree(savedcmd);
1695 savedcmd = NULL;
1696 return -ENOMEM;
1697 }
1698
1699 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001700}
1701
Carsten Emdeb5130b12009-09-13 01:43:07 +02001702int is_tracing_stopped(void)
1703{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001704 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001705}
1706
Steven Rostedt0f048702008-11-05 16:05:44 -05001707/**
1708 * tracing_start - quick start of the tracer
1709 *
1710 * If tracing is enabled but was stopped by tracing_stop,
1711 * this will start the tracer back up.
1712 */
1713void tracing_start(void)
1714{
1715 struct ring_buffer *buffer;
1716 unsigned long flags;
1717
1718 if (tracing_disabled)
1719 return;
1720
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001721 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1722 if (--global_trace.stop_count) {
1723 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001724 /* Someone screwed up their debugging */
1725 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001726 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001727 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001728 goto out;
1729 }
1730
Steven Rostedta2f80712010-03-12 19:56:00 -05001731 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001732 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001733
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001734 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001735 if (buffer)
1736 ring_buffer_record_enable(buffer);
1737
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001738#ifdef CONFIG_TRACER_MAX_TRACE
1739 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001740 if (buffer)
1741 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001742#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001743
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001744 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001745
Steven Rostedt0f048702008-11-05 16:05:44 -05001746 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001747 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1748}
1749
1750static void tracing_start_tr(struct trace_array *tr)
1751{
1752 struct ring_buffer *buffer;
1753 unsigned long flags;
1754
1755 if (tracing_disabled)
1756 return;
1757
1758 /* If global, we need to also start the max tracer */
1759 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1760 return tracing_start();
1761
1762 raw_spin_lock_irqsave(&tr->start_lock, flags);
1763
1764 if (--tr->stop_count) {
1765 if (tr->stop_count < 0) {
1766 /* Someone screwed up their debugging */
1767 WARN_ON_ONCE(1);
1768 tr->stop_count = 0;
1769 }
1770 goto out;
1771 }
1772
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001773 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001774 if (buffer)
1775 ring_buffer_record_enable(buffer);
1776
1777 out:
1778 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001779}
1780
1781/**
1782 * tracing_stop - quick stop of the tracer
1783 *
1784 * Light weight way to stop tracing. Use in conjunction with
1785 * tracing_start.
1786 */
1787void tracing_stop(void)
1788{
1789 struct ring_buffer *buffer;
1790 unsigned long flags;
1791
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001792 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1793 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001794 goto out;
1795
Steven Rostedta2f80712010-03-12 19:56:00 -05001796 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001797 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001798
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001799 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001800 if (buffer)
1801 ring_buffer_record_disable(buffer);
1802
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001803#ifdef CONFIG_TRACER_MAX_TRACE
1804 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001805 if (buffer)
1806 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001807#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001808
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001809 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001810
Steven Rostedt0f048702008-11-05 16:05:44 -05001811 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001812 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1813}
1814
1815static void tracing_stop_tr(struct trace_array *tr)
1816{
1817 struct ring_buffer *buffer;
1818 unsigned long flags;
1819
1820 /* If global, we need to also stop the max tracer */
1821 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1822 return tracing_stop();
1823
1824 raw_spin_lock_irqsave(&tr->start_lock, flags);
1825 if (tr->stop_count++)
1826 goto out;
1827
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001828 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001829 if (buffer)
1830 ring_buffer_record_disable(buffer);
1831
1832 out:
1833 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001834}
1835
Ingo Molnare309b412008-05-12 21:20:51 +02001836void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001837
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001838static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001839{
Carsten Emdea635cf02009-03-18 09:00:41 +01001840 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001841
1842 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001843 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001844
1845 /*
1846 * It's not the end of the world if we don't get
1847 * the lock, but we also don't want to spin
1848 * nor do we want to disable interrupts,
1849 * so if we miss here, then better luck next time.
1850 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001851 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001852 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001853
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001854 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001855 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001856 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001857
Carsten Emdea635cf02009-03-18 09:00:41 +01001858 /*
1859 * Check whether the cmdline buffer at idx has a pid
1860 * mapped. We are going to overwrite that entry so we
1861 * need to clear the map_pid_to_cmdline. Otherwise we
1862 * would read the new comm for the old pid.
1863 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001864 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001865 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001866 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001867
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001868 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1869 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001870
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001871 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001872 }
1873
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001874 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001875
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001876 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001877
1878 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001879}
1880
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001881static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001882{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001883 unsigned map;
1884
Steven Rostedt4ca530852009-03-16 19:20:15 -04001885 if (!pid) {
1886 strcpy(comm, "<idle>");
1887 return;
1888 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001889
Steven Rostedt74bf4072010-01-25 15:11:53 -05001890 if (WARN_ON_ONCE(pid < 0)) {
1891 strcpy(comm, "<XXX>");
1892 return;
1893 }
1894
Steven Rostedt4ca530852009-03-16 19:20:15 -04001895 if (pid > PID_MAX_DEFAULT) {
1896 strcpy(comm, "<...>");
1897 return;
1898 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001899
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001900 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001901 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001902 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001903 else
1904 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001905}
1906
1907void trace_find_cmdline(int pid, char comm[])
1908{
1909 preempt_disable();
1910 arch_spin_lock(&trace_cmdline_lock);
1911
1912 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001913
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001914 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001915 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001916}
1917
Ingo Molnare309b412008-05-12 21:20:51 +02001918void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001919{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001920 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001921 return;
1922
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001923 if (!__this_cpu_read(trace_cmdline_save))
1924 return;
1925
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001926 if (trace_save_cmdline(tsk))
1927 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001928}
1929
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001930void
Steven Rostedt38697052008-10-01 13:14:09 -04001931tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1932 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001933{
1934 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001935
Steven Rostedt777e2082008-09-29 23:02:42 -04001936 entry->preempt_count = pc & 0xff;
1937 entry->pid = (tsk) ? tsk->pid : 0;
1938 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001939#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001940 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001941#else
1942 TRACE_FLAG_IRQS_NOSUPPORT |
1943#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01001944 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001945 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05301946 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001947 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1948 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001949}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001950EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001951
Steven Rostedte77405a2009-09-02 14:17:06 -04001952struct ring_buffer_event *
1953trace_buffer_lock_reserve(struct ring_buffer *buffer,
1954 int type,
1955 unsigned long len,
1956 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001957{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001958 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001959}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001960
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001961DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1962DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1963static int trace_buffered_event_ref;
1964
1965/**
1966 * trace_buffered_event_enable - enable buffering events
1967 *
1968 * When events are being filtered, it is quicker to use a temporary
1969 * buffer to write the event data into if there's a likely chance
1970 * that it will not be committed. The discard of the ring buffer
1971 * is not as fast as committing, and is much slower than copying
1972 * a commit.
1973 *
1974 * When an event is to be filtered, allocate per cpu buffers to
1975 * write the event data into, and if the event is filtered and discarded
1976 * it is simply dropped, otherwise, the entire data is to be committed
1977 * in one shot.
1978 */
1979void trace_buffered_event_enable(void)
1980{
1981 struct ring_buffer_event *event;
1982 struct page *page;
1983 int cpu;
1984
1985 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
1986
1987 if (trace_buffered_event_ref++)
1988 return;
1989
1990 for_each_tracing_cpu(cpu) {
1991 page = alloc_pages_node(cpu_to_node(cpu),
1992 GFP_KERNEL | __GFP_NORETRY, 0);
1993 if (!page)
1994 goto failed;
1995
1996 event = page_address(page);
1997 memset(event, 0, sizeof(*event));
1998
1999 per_cpu(trace_buffered_event, cpu) = event;
2000
2001 preempt_disable();
2002 if (cpu == smp_processor_id() &&
2003 this_cpu_read(trace_buffered_event) !=
2004 per_cpu(trace_buffered_event, cpu))
2005 WARN_ON_ONCE(1);
2006 preempt_enable();
2007 }
2008
2009 return;
2010 failed:
2011 trace_buffered_event_disable();
2012}
2013
2014static void enable_trace_buffered_event(void *data)
2015{
2016 /* Probably not needed, but do it anyway */
2017 smp_rmb();
2018 this_cpu_dec(trace_buffered_event_cnt);
2019}
2020
2021static void disable_trace_buffered_event(void *data)
2022{
2023 this_cpu_inc(trace_buffered_event_cnt);
2024}
2025
2026/**
2027 * trace_buffered_event_disable - disable buffering events
2028 *
2029 * When a filter is removed, it is faster to not use the buffered
2030 * events, and to commit directly into the ring buffer. Free up
2031 * the temp buffers when there are no more users. This requires
2032 * special synchronization with current events.
2033 */
2034void trace_buffered_event_disable(void)
2035{
2036 int cpu;
2037
2038 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2039
2040 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2041 return;
2042
2043 if (--trace_buffered_event_ref)
2044 return;
2045
2046 preempt_disable();
2047 /* For each CPU, set the buffer as used. */
2048 smp_call_function_many(tracing_buffer_mask,
2049 disable_trace_buffered_event, NULL, 1);
2050 preempt_enable();
2051
2052 /* Wait for all current users to finish */
2053 synchronize_sched();
2054
2055 for_each_tracing_cpu(cpu) {
2056 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2057 per_cpu(trace_buffered_event, cpu) = NULL;
2058 }
2059 /*
2060 * Make sure trace_buffered_event is NULL before clearing
2061 * trace_buffered_event_cnt.
2062 */
2063 smp_wmb();
2064
2065 preempt_disable();
2066 /* Do the work on each cpu */
2067 smp_call_function_many(tracing_buffer_mask,
2068 enable_trace_buffered_event, NULL, 1);
2069 preempt_enable();
2070}
2071
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002072static struct ring_buffer *temp_buffer;
2073
Steven Rostedtef5580d2009-02-27 19:38:04 -05002074struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002075trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002076 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002077 int type, unsigned long len,
2078 unsigned long flags, int pc)
2079{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002080 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002081 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002082
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002083 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002084
2085 if ((trace_file->flags &
2086 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2087 (entry = this_cpu_read(trace_buffered_event))) {
2088 /* Try to use the per cpu buffer first */
2089 val = this_cpu_inc_return(trace_buffered_event_cnt);
2090 if (val == 1) {
2091 trace_event_setup(entry, type, flags, pc);
2092 entry->array[0] = len;
2093 return entry;
2094 }
2095 this_cpu_dec(trace_buffered_event_cnt);
2096 }
2097
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002098 entry = __trace_buffer_lock_reserve(*current_rb,
2099 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002100 /*
2101 * If tracing is off, but we have triggers enabled
2102 * we still need to look at the event data. Use the temp_buffer
2103 * to store the trace event for the tigger to use. It's recusive
2104 * safe and will not be recorded anywhere.
2105 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002106 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002107 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002108 entry = __trace_buffer_lock_reserve(*current_rb,
2109 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002110 }
2111 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002112}
2113EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2114
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002115static DEFINE_SPINLOCK(tracepoint_iter_lock);
2116static DEFINE_MUTEX(tracepoint_printk_mutex);
2117
2118static void output_printk(struct trace_event_buffer *fbuffer)
2119{
2120 struct trace_event_call *event_call;
2121 struct trace_event *event;
2122 unsigned long flags;
2123 struct trace_iterator *iter = tracepoint_print_iter;
2124
2125 /* We should never get here if iter is NULL */
2126 if (WARN_ON_ONCE(!iter))
2127 return;
2128
2129 event_call = fbuffer->trace_file->event_call;
2130 if (!event_call || !event_call->event.funcs ||
2131 !event_call->event.funcs->trace)
2132 return;
2133
2134 event = &fbuffer->trace_file->event_call->event;
2135
2136 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2137 trace_seq_init(&iter->seq);
2138 iter->ent = fbuffer->entry;
2139 event_call->event.funcs->trace(iter, 0, event);
2140 trace_seq_putc(&iter->seq, 0);
2141 printk("%s", iter->seq.buffer);
2142
2143 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2144}
2145
2146int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2147 void __user *buffer, size_t *lenp,
2148 loff_t *ppos)
2149{
2150 int save_tracepoint_printk;
2151 int ret;
2152
2153 mutex_lock(&tracepoint_printk_mutex);
2154 save_tracepoint_printk = tracepoint_printk;
2155
2156 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2157
2158 /*
2159 * This will force exiting early, as tracepoint_printk
2160 * is always zero when tracepoint_printk_iter is not allocated
2161 */
2162 if (!tracepoint_print_iter)
2163 tracepoint_printk = 0;
2164
2165 if (save_tracepoint_printk == tracepoint_printk)
2166 goto out;
2167
2168 if (tracepoint_printk)
2169 static_key_enable(&tracepoint_printk_key.key);
2170 else
2171 static_key_disable(&tracepoint_printk_key.key);
2172
2173 out:
2174 mutex_unlock(&tracepoint_printk_mutex);
2175
2176 return ret;
2177}
2178
2179void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2180{
2181 if (static_key_false(&tracepoint_printk_key.key))
2182 output_printk(fbuffer);
2183
2184 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2185 fbuffer->event, fbuffer->entry,
2186 fbuffer->flags, fbuffer->pc);
2187}
2188EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2189
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002190void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2191 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002192 struct ring_buffer_event *event,
2193 unsigned long flags, int pc,
2194 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002195{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002196 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002197
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002198 /*
2199 * If regs is not set, then skip the following callers:
2200 * trace_buffer_unlock_commit_regs
2201 * event_trigger_unlock_commit
2202 * trace_event_buffer_commit
2203 * trace_event_raw_event_sched_switch
2204 * Note, we can still get here via blktrace, wakeup tracer
2205 * and mmiotrace, but that's ok if they lose a function or
2206 * two. They are that meaningful.
2207 */
2208 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002209 ftrace_trace_userstack(buffer, flags, pc);
2210}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002211
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002212/*
2213 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2214 */
2215void
2216trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2217 struct ring_buffer_event *event)
2218{
2219 __buffer_unlock_commit(buffer, event);
2220}
2221
Chunyan Zhang478409d2016-11-21 15:57:18 +08002222static void
2223trace_process_export(struct trace_export *export,
2224 struct ring_buffer_event *event)
2225{
2226 struct trace_entry *entry;
2227 unsigned int size = 0;
2228
2229 entry = ring_buffer_event_data(event);
2230 size = ring_buffer_event_length(event);
2231 export->write(entry, size);
2232}
2233
2234static DEFINE_MUTEX(ftrace_export_lock);
2235
2236static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2237
2238static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2239
2240static inline void ftrace_exports_enable(void)
2241{
2242 static_branch_enable(&ftrace_exports_enabled);
2243}
2244
2245static inline void ftrace_exports_disable(void)
2246{
2247 static_branch_disable(&ftrace_exports_enabled);
2248}
2249
2250void ftrace_exports(struct ring_buffer_event *event)
2251{
2252 struct trace_export *export;
2253
2254 preempt_disable_notrace();
2255
2256 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2257 while (export) {
2258 trace_process_export(export, event);
2259 export = rcu_dereference_raw_notrace(export->next);
2260 }
2261
2262 preempt_enable_notrace();
2263}
2264
2265static inline void
2266add_trace_export(struct trace_export **list, struct trace_export *export)
2267{
2268 rcu_assign_pointer(export->next, *list);
2269 /*
2270 * We are entering export into the list but another
2271 * CPU might be walking that list. We need to make sure
2272 * the export->next pointer is valid before another CPU sees
2273 * the export pointer included into the list.
2274 */
2275 rcu_assign_pointer(*list, export);
2276}
2277
2278static inline int
2279rm_trace_export(struct trace_export **list, struct trace_export *export)
2280{
2281 struct trace_export **p;
2282
2283 for (p = list; *p != NULL; p = &(*p)->next)
2284 if (*p == export)
2285 break;
2286
2287 if (*p != export)
2288 return -1;
2289
2290 rcu_assign_pointer(*p, (*p)->next);
2291
2292 return 0;
2293}
2294
2295static inline void
2296add_ftrace_export(struct trace_export **list, struct trace_export *export)
2297{
2298 if (*list == NULL)
2299 ftrace_exports_enable();
2300
2301 add_trace_export(list, export);
2302}
2303
2304static inline int
2305rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2306{
2307 int ret;
2308
2309 ret = rm_trace_export(list, export);
2310 if (*list == NULL)
2311 ftrace_exports_disable();
2312
2313 return ret;
2314}
2315
2316int register_ftrace_export(struct trace_export *export)
2317{
2318 if (WARN_ON_ONCE(!export->write))
2319 return -1;
2320
2321 mutex_lock(&ftrace_export_lock);
2322
2323 add_ftrace_export(&ftrace_exports_list, export);
2324
2325 mutex_unlock(&ftrace_export_lock);
2326
2327 return 0;
2328}
2329EXPORT_SYMBOL_GPL(register_ftrace_export);
2330
2331int unregister_ftrace_export(struct trace_export *export)
2332{
2333 int ret;
2334
2335 mutex_lock(&ftrace_export_lock);
2336
2337 ret = rm_ftrace_export(&ftrace_exports_list, export);
2338
2339 mutex_unlock(&ftrace_export_lock);
2340
2341 return ret;
2342}
2343EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2344
Ingo Molnare309b412008-05-12 21:20:51 +02002345void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002346trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002347 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2348 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002349{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002350 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002351 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002352 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002353 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002354
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002355 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2356 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002357 if (!event)
2358 return;
2359 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002360 entry->ip = ip;
2361 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002362
Chunyan Zhang478409d2016-11-21 15:57:18 +08002363 if (!call_filter_check_discard(call, entry, buffer, event)) {
2364 if (static_branch_unlikely(&ftrace_exports_enabled))
2365 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002366 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002367 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368}
2369
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002370#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002371
2372#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2373struct ftrace_stack {
2374 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2375};
2376
2377static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2378static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2379
Steven Rostedte77405a2009-09-02 14:17:06 -04002380static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002381 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002382 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002383{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002384 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002385 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002386 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002387 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002388 int use_stack;
2389 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002390
2391 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002392 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002393
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002394 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002395 * Add two, for this function and the call to save_stack_trace()
2396 * If regs is set, then these functions will not be in the way.
2397 */
2398 if (!regs)
2399 trace.skip += 2;
2400
2401 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002402 * Since events can happen in NMIs there's no safe way to
2403 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2404 * or NMI comes in, it will just have to use the default
2405 * FTRACE_STACK_SIZE.
2406 */
2407 preempt_disable_notrace();
2408
Shan Wei82146522012-11-19 13:21:01 +08002409 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002410 /*
2411 * We don't need any atomic variables, just a barrier.
2412 * If an interrupt comes in, we don't care, because it would
2413 * have exited and put the counter back to what we want.
2414 * We just need a barrier to keep gcc from moving things
2415 * around.
2416 */
2417 barrier();
2418 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002419 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002420 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2421
2422 if (regs)
2423 save_stack_trace_regs(regs, &trace);
2424 else
2425 save_stack_trace(&trace);
2426
2427 if (trace.nr_entries > size)
2428 size = trace.nr_entries;
2429 } else
2430 /* From now on, use_stack is a boolean */
2431 use_stack = 0;
2432
2433 size *= sizeof(unsigned long);
2434
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002435 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2436 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002437 if (!event)
2438 goto out;
2439 entry = ring_buffer_event_data(event);
2440
2441 memset(&entry->caller, 0, size);
2442
2443 if (use_stack)
2444 memcpy(&entry->caller, trace.entries,
2445 trace.nr_entries * sizeof(unsigned long));
2446 else {
2447 trace.max_entries = FTRACE_STACK_ENTRIES;
2448 trace.entries = entry->caller;
2449 if (regs)
2450 save_stack_trace_regs(regs, &trace);
2451 else
2452 save_stack_trace(&trace);
2453 }
2454
2455 entry->size = trace.nr_entries;
2456
Tom Zanussif306cc82013-10-24 08:34:17 -05002457 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002458 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002459
2460 out:
2461 /* Again, don't let gcc optimize things here */
2462 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002463 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002464 preempt_enable_notrace();
2465
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002466}
2467
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002468static inline void ftrace_trace_stack(struct trace_array *tr,
2469 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002470 unsigned long flags,
2471 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002472{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002473 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002474 return;
2475
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002476 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002477}
2478
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002479void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2480 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002481{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002482 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002483}
2484
Steven Rostedt03889382009-12-11 09:48:22 -05002485/**
2486 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002487 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002488 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002489void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002490{
2491 unsigned long flags;
2492
2493 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002494 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002495
2496 local_save_flags(flags);
2497
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002498 /*
2499 * Skip 3 more, seems to get us at the caller of
2500 * this function.
2501 */
2502 skip += 3;
2503 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2504 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002505}
2506
Steven Rostedt91e86e52010-11-10 12:56:12 +01002507static DEFINE_PER_CPU(int, user_stack_count);
2508
Steven Rostedte77405a2009-09-02 14:17:06 -04002509void
2510ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002511{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002512 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002513 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002514 struct userstack_entry *entry;
2515 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002516
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002517 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002518 return;
2519
Steven Rostedtb6345872010-03-12 20:03:30 -05002520 /*
2521 * NMIs can not handle page faults, even with fix ups.
2522 * The save user stack can (and often does) fault.
2523 */
2524 if (unlikely(in_nmi()))
2525 return;
2526
Steven Rostedt91e86e52010-11-10 12:56:12 +01002527 /*
2528 * prevent recursion, since the user stack tracing may
2529 * trigger other kernel events.
2530 */
2531 preempt_disable();
2532 if (__this_cpu_read(user_stack_count))
2533 goto out;
2534
2535 __this_cpu_inc(user_stack_count);
2536
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002537 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2538 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002539 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002540 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002541 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002542
Steven Rostedt48659d32009-09-11 11:36:23 -04002543 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002544 memset(&entry->caller, 0, sizeof(entry->caller));
2545
2546 trace.nr_entries = 0;
2547 trace.max_entries = FTRACE_STACK_ENTRIES;
2548 trace.skip = 0;
2549 trace.entries = entry->caller;
2550
2551 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002552 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002553 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002554
Li Zefan1dbd1952010-12-09 15:47:56 +08002555 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002556 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002557 out:
2558 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002559}
2560
Hannes Eder4fd27352009-02-10 19:44:12 +01002561#ifdef UNUSED
2562static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002563{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002564 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002565}
Hannes Eder4fd27352009-02-10 19:44:12 +01002566#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002567
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002568#endif /* CONFIG_STACKTRACE */
2569
Steven Rostedt07d777f2011-09-22 14:01:55 -04002570/* created for use with alloc_percpu */
2571struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002572 int nesting;
2573 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002574};
2575
2576static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002577
2578/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002579 * Thise allows for lockless recording. If we're nested too deeply, then
2580 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002581 */
2582static char *get_trace_buf(void)
2583{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002584 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002585
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002586 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002587 return NULL;
2588
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002589 return &buffer->buffer[buffer->nesting++][0];
2590}
2591
2592static void put_trace_buf(void)
2593{
2594 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002595}
2596
2597static int alloc_percpu_trace_buffer(void)
2598{
2599 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002600
2601 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002602 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2603 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002604
2605 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002606 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002607}
2608
Steven Rostedt81698832012-10-11 10:15:05 -04002609static int buffers_allocated;
2610
Steven Rostedt07d777f2011-09-22 14:01:55 -04002611void trace_printk_init_buffers(void)
2612{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002613 if (buffers_allocated)
2614 return;
2615
2616 if (alloc_percpu_trace_buffer())
2617 return;
2618
Steven Rostedt2184db42014-05-28 13:14:40 -04002619 /* trace_printk() is for debug use only. Don't use it in production. */
2620
Joe Perchesa395d6a2016-03-22 14:28:09 -07002621 pr_warn("\n");
2622 pr_warn("**********************************************************\n");
2623 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2624 pr_warn("** **\n");
2625 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2626 pr_warn("** **\n");
2627 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2628 pr_warn("** unsafe for production use. **\n");
2629 pr_warn("** **\n");
2630 pr_warn("** If you see this message and you are not debugging **\n");
2631 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2632 pr_warn("** **\n");
2633 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2634 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002635
Steven Rostedtb382ede62012-10-10 21:44:34 -04002636 /* Expand the buffers to set size */
2637 tracing_update_buffers();
2638
Steven Rostedt07d777f2011-09-22 14:01:55 -04002639 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002640
2641 /*
2642 * trace_printk_init_buffers() can be called by modules.
2643 * If that happens, then we need to start cmdline recording
2644 * directly here. If the global_trace.buffer is already
2645 * allocated here, then this was called by module code.
2646 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002647 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002648 tracing_start_cmdline_record();
2649}
2650
2651void trace_printk_start_comm(void)
2652{
2653 /* Start tracing comms if trace printk is set */
2654 if (!buffers_allocated)
2655 return;
2656 tracing_start_cmdline_record();
2657}
2658
2659static void trace_printk_start_stop_comm(int enabled)
2660{
2661 if (!buffers_allocated)
2662 return;
2663
2664 if (enabled)
2665 tracing_start_cmdline_record();
2666 else
2667 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002668}
2669
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002670/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002671 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002672 *
2673 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002674int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002675{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002676 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002677 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002678 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002679 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002680 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002681 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002682 char *tbuffer;
2683 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002684
2685 if (unlikely(tracing_selftest_running || tracing_disabled))
2686 return 0;
2687
2688 /* Don't pollute graph traces with trace_vprintk internals */
2689 pause_graph_tracing();
2690
2691 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002692 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002693
Steven Rostedt07d777f2011-09-22 14:01:55 -04002694 tbuffer = get_trace_buf();
2695 if (!tbuffer) {
2696 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002697 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002698 }
2699
2700 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2701
2702 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002703 goto out;
2704
Steven Rostedt07d777f2011-09-22 14:01:55 -04002705 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002706 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002707 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002708 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2709 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002710 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002711 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002712 entry = ring_buffer_event_data(event);
2713 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002714 entry->fmt = fmt;
2715
Steven Rostedt07d777f2011-09-22 14:01:55 -04002716 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002717 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002718 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002719 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002720 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002721
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002722out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002723 put_trace_buf();
2724
2725out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002726 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002727 unpause_graph_tracing();
2728
2729 return len;
2730}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002731EXPORT_SYMBOL_GPL(trace_vbprintk);
2732
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002733static int
2734__trace_array_vprintk(struct ring_buffer *buffer,
2735 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002736{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002737 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002738 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002739 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002740 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002741 unsigned long flags;
2742 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002743
2744 if (tracing_disabled || tracing_selftest_running)
2745 return 0;
2746
Steven Rostedt07d777f2011-09-22 14:01:55 -04002747 /* Don't pollute graph traces with trace_vprintk internals */
2748 pause_graph_tracing();
2749
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002750 pc = preempt_count();
2751 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002752
Steven Rostedt07d777f2011-09-22 14:01:55 -04002753
2754 tbuffer = get_trace_buf();
2755 if (!tbuffer) {
2756 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002757 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002758 }
2759
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002760 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002761
Steven Rostedt07d777f2011-09-22 14:01:55 -04002762 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002763 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002764 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2765 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002766 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002767 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002768 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002769 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002770
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002771 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002772 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002773 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002774 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002775 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002776
2777out:
2778 put_trace_buf();
2779
2780out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002781 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002782 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002783
2784 return len;
2785}
Steven Rostedt659372d2009-09-03 19:11:07 -04002786
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002787int trace_array_vprintk(struct trace_array *tr,
2788 unsigned long ip, const char *fmt, va_list args)
2789{
2790 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2791}
2792
2793int trace_array_printk(struct trace_array *tr,
2794 unsigned long ip, const char *fmt, ...)
2795{
2796 int ret;
2797 va_list ap;
2798
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002799 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002800 return 0;
2801
2802 va_start(ap, fmt);
2803 ret = trace_array_vprintk(tr, ip, fmt, ap);
2804 va_end(ap);
2805 return ret;
2806}
2807
2808int trace_array_printk_buf(struct ring_buffer *buffer,
2809 unsigned long ip, const char *fmt, ...)
2810{
2811 int ret;
2812 va_list ap;
2813
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002814 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002815 return 0;
2816
2817 va_start(ap, fmt);
2818 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2819 va_end(ap);
2820 return ret;
2821}
2822
Steven Rostedt659372d2009-09-03 19:11:07 -04002823int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2824{
Steven Rostedta813a152009-10-09 01:41:35 -04002825 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002826}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002827EXPORT_SYMBOL_GPL(trace_vprintk);
2828
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002829static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002830{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002831 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2832
Steven Rostedt5a90f572008-09-03 17:42:51 -04002833 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002834 if (buf_iter)
2835 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002836}
2837
Ingo Molnare309b412008-05-12 21:20:51 +02002838static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002839peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2840 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002841{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002842 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002843 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002844
Steven Rostedtd7690412008-10-01 00:29:53 -04002845 if (buf_iter)
2846 event = ring_buffer_iter_peek(buf_iter, ts);
2847 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002848 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002849 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002850
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002851 if (event) {
2852 iter->ent_size = ring_buffer_event_length(event);
2853 return ring_buffer_event_data(event);
2854 }
2855 iter->ent_size = 0;
2856 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002857}
Steven Rostedtd7690412008-10-01 00:29:53 -04002858
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002859static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002860__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2861 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002862{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002863 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002864 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002865 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002866 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002867 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002868 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002869 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002870 int cpu;
2871
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002872 /*
2873 * If we are in a per_cpu trace file, don't bother by iterating over
2874 * all cpu and peek directly.
2875 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002876 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002877 if (ring_buffer_empty_cpu(buffer, cpu_file))
2878 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002879 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002880 if (ent_cpu)
2881 *ent_cpu = cpu_file;
2882
2883 return ent;
2884 }
2885
Steven Rostedtab464282008-05-12 21:21:00 +02002886 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002887
2888 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002889 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002890
Steven Rostedtbc21b472010-03-31 19:49:26 -04002891 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002892
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02002893 /*
2894 * Pick the entry with the smallest timestamp:
2895 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002896 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002897 next = ent;
2898 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002899 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002900 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002901 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002902 }
2903 }
2904
Steven Rostedt12b5da32012-03-27 10:43:28 -04002905 iter->ent_size = next_size;
2906
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002907 if (ent_cpu)
2908 *ent_cpu = next_cpu;
2909
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002910 if (ent_ts)
2911 *ent_ts = next_ts;
2912
Steven Rostedtbc21b472010-03-31 19:49:26 -04002913 if (missing_events)
2914 *missing_events = next_lost;
2915
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002916 return next;
2917}
2918
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002919/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002920struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2921 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002922{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002923 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002924}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002925
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002926/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002927void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002928{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002929 iter->ent = __find_next_entry(iter, &iter->cpu,
2930 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002931
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002932 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002933 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002934
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002935 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002936}
2937
Ingo Molnare309b412008-05-12 21:20:51 +02002938static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002939{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002940 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002941 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002942}
2943
Ingo Molnare309b412008-05-12 21:20:51 +02002944static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002945{
2946 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002947 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002948 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002949
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002950 WARN_ON_ONCE(iter->leftover);
2951
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002952 (*pos)++;
2953
2954 /* can't go backwards */
2955 if (iter->idx > i)
2956 return NULL;
2957
2958 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002959 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002960 else
2961 ent = iter;
2962
2963 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002964 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002965
2966 iter->pos = *pos;
2967
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002968 return ent;
2969}
2970
Jason Wessel955b61e2010-08-05 09:22:23 -05002971void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002972{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002973 struct ring_buffer_event *event;
2974 struct ring_buffer_iter *buf_iter;
2975 unsigned long entries = 0;
2976 u64 ts;
2977
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002978 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002979
Steven Rostedt6d158a82012-06-27 20:46:14 -04002980 buf_iter = trace_buffer_iter(iter, cpu);
2981 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002982 return;
2983
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002984 ring_buffer_iter_reset(buf_iter);
2985
2986 /*
2987 * We could have the case with the max latency tracers
2988 * that a reset never took place on a cpu. This is evident
2989 * by the timestamp being before the start of the buffer.
2990 */
2991 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002992 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002993 break;
2994 entries++;
2995 ring_buffer_read(buf_iter, NULL);
2996 }
2997
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002998 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002999}
3000
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003001/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003002 * The current tracer is copied to avoid a global locking
3003 * all around.
3004 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003005static void *s_start(struct seq_file *m, loff_t *pos)
3006{
3007 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003008 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003009 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003010 void *p = NULL;
3011 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003012 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003013
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003014 /*
3015 * copy the tracer to avoid using a global lock all around.
3016 * iter->trace is a copy of current_trace, the pointer to the
3017 * name may be used instead of a strcmp(), as iter->trace->name
3018 * will point to the same string as current_trace->name.
3019 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003020 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003021 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3022 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003023 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003024
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003025#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003026 if (iter->snapshot && iter->trace->use_max_tr)
3027 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003028#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003029
3030 if (!iter->snapshot)
3031 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003032
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003033 if (*pos != iter->pos) {
3034 iter->ent = NULL;
3035 iter->cpu = 0;
3036 iter->idx = -1;
3037
Steven Rostedtae3b5092013-01-23 15:22:59 -05003038 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003039 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003040 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003041 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003042 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003043
Lai Jiangshanac91d852010-03-02 17:54:50 +08003044 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003045 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3046 ;
3047
3048 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003049 /*
3050 * If we overflowed the seq_file before, then we want
3051 * to just reuse the trace_seq buffer again.
3052 */
3053 if (iter->leftover)
3054 p = iter;
3055 else {
3056 l = *pos - 1;
3057 p = s_next(m, p, &l);
3058 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003059 }
3060
Lai Jiangshan4f535962009-05-18 19:35:34 +08003061 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003062 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003063 return p;
3064}
3065
3066static void s_stop(struct seq_file *m, void *p)
3067{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003068 struct trace_iterator *iter = m->private;
3069
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003070#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003071 if (iter->snapshot && iter->trace->use_max_tr)
3072 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003073#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003074
3075 if (!iter->snapshot)
3076 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003077
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003078 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003079 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003080}
3081
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003082static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003083get_total_entries(struct trace_buffer *buf,
3084 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003085{
3086 unsigned long count;
3087 int cpu;
3088
3089 *total = 0;
3090 *entries = 0;
3091
3092 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003093 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003094 /*
3095 * If this buffer has skipped entries, then we hold all
3096 * entries for the trace and we need to ignore the
3097 * ones before the time stamp.
3098 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003099 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3100 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003101 /* total is the same as the entries */
3102 *total += count;
3103 } else
3104 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003105 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003106 *entries += count;
3107 }
3108}
3109
Ingo Molnare309b412008-05-12 21:20:51 +02003110static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003111{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003112 seq_puts(m, "# _------=> CPU# \n"
3113 "# / _-----=> irqs-off \n"
3114 "# | / _----=> need-resched \n"
3115 "# || / _---=> hardirq/softirq \n"
3116 "# ||| / _--=> preempt-depth \n"
3117 "# |||| / delay \n"
3118 "# cmd pid ||||| time | caller \n"
3119 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003120}
3121
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003122static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003123{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003124 unsigned long total;
3125 unsigned long entries;
3126
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003127 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003128 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3129 entries, total, num_online_cpus());
3130 seq_puts(m, "#\n");
3131}
3132
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003133static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003134{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003135 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003136 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
3137 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003138}
3139
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003140static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003141{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003142 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003143 seq_puts(m, "# _-----=> irqs-off\n"
3144 "# / _----=> need-resched\n"
3145 "# | / _---=> hardirq/softirq\n"
3146 "# || / _--=> preempt-depth\n"
3147 "# ||| / delay\n"
3148 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
3149 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003150}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003151
Jiri Olsa62b915f2010-04-02 19:01:22 +02003152void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003153print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3154{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003155 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003156 struct trace_buffer *buf = iter->trace_buffer;
3157 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003158 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003159 unsigned long entries;
3160 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003161 const char *name = "preemption";
3162
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003163 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003164
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003165 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003166
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003167 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003168 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003169 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003170 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003171 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003172 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003173 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003174 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003175 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003176 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003177#if defined(CONFIG_PREEMPT_NONE)
3178 "server",
3179#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3180 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003181#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003182 "preempt",
3183#else
3184 "unknown",
3185#endif
3186 /* These are reserved for later use */
3187 0, 0, 0, 0);
3188#ifdef CONFIG_SMP
3189 seq_printf(m, " #P:%d)\n", num_online_cpus());
3190#else
3191 seq_puts(m, ")\n");
3192#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003193 seq_puts(m, "# -----------------\n");
3194 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003195 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003196 data->comm, data->pid,
3197 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003198 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003199 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003200
3201 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003202 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003203 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3204 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003205 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003206 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3207 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003208 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003209 }
3210
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003211 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003212}
3213
Steven Rostedta3097202008-11-07 22:36:02 -05003214static void test_cpu_buff_start(struct trace_iterator *iter)
3215{
3216 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003217 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003218
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003219 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003220 return;
3221
3222 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3223 return;
3224
Sasha Levin919cd972015-09-04 12:45:56 -04003225 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003226 return;
3227
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003228 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003229 return;
3230
Sasha Levin919cd972015-09-04 12:45:56 -04003231 if (iter->started)
3232 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003233
3234 /* Don't print started cpu buffer for the first entry of the trace */
3235 if (iter->idx > 1)
3236 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3237 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003238}
3239
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003240static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003241{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003242 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003243 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003244 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003245 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003246 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003247
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003248 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003249
Steven Rostedta3097202008-11-07 22:36:02 -05003250 test_cpu_buff_start(iter);
3251
Steven Rostedtf633cef2008-12-23 23:24:13 -05003252 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003253
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003254 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003255 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3256 trace_print_lat_context(iter);
3257 else
3258 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003259 }
3260
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003261 if (trace_seq_has_overflowed(s))
3262 return TRACE_TYPE_PARTIAL_LINE;
3263
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003264 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003265 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003266
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003267 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003268
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003269 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003270}
3271
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003272static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003273{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003274 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003275 struct trace_seq *s = &iter->seq;
3276 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003277 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003278
3279 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003280
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003281 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003282 trace_seq_printf(s, "%d %d %llu ",
3283 entry->pid, iter->cpu, iter->ts);
3284
3285 if (trace_seq_has_overflowed(s))
3286 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003287
Steven Rostedtf633cef2008-12-23 23:24:13 -05003288 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003289 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003290 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003291
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003292 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003293
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003294 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003295}
3296
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003297static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003298{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003299 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003300 struct trace_seq *s = &iter->seq;
3301 unsigned char newline = '\n';
3302 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003303 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003304
3305 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003306
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003307 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003308 SEQ_PUT_HEX_FIELD(s, entry->pid);
3309 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3310 SEQ_PUT_HEX_FIELD(s, iter->ts);
3311 if (trace_seq_has_overflowed(s))
3312 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003313 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003314
Steven Rostedtf633cef2008-12-23 23:24:13 -05003315 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003316 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003317 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003318 if (ret != TRACE_TYPE_HANDLED)
3319 return ret;
3320 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003321
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003322 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003323
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003324 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003325}
3326
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003327static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003328{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003329 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003330 struct trace_seq *s = &iter->seq;
3331 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003332 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003333
3334 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003335
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003336 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003337 SEQ_PUT_FIELD(s, entry->pid);
3338 SEQ_PUT_FIELD(s, iter->cpu);
3339 SEQ_PUT_FIELD(s, iter->ts);
3340 if (trace_seq_has_overflowed(s))
3341 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003342 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003343
Steven Rostedtf633cef2008-12-23 23:24:13 -05003344 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003345 return event ? event->funcs->binary(iter, 0, event) :
3346 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003347}
3348
Jiri Olsa62b915f2010-04-02 19:01:22 +02003349int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003350{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003351 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003352 int cpu;
3353
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003354 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003355 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003356 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003357 buf_iter = trace_buffer_iter(iter, cpu);
3358 if (buf_iter) {
3359 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003360 return 0;
3361 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003362 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003363 return 0;
3364 }
3365 return 1;
3366 }
3367
Steven Rostedtab464282008-05-12 21:21:00 +02003368 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003369 buf_iter = trace_buffer_iter(iter, cpu);
3370 if (buf_iter) {
3371 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003372 return 0;
3373 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003374 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003375 return 0;
3376 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003377 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003378
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003379 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003380}
3381
Lai Jiangshan4f535962009-05-18 19:35:34 +08003382/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003383enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003384{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003385 struct trace_array *tr = iter->tr;
3386 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003387 enum print_line_t ret;
3388
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003389 if (iter->lost_events) {
3390 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3391 iter->cpu, iter->lost_events);
3392 if (trace_seq_has_overflowed(&iter->seq))
3393 return TRACE_TYPE_PARTIAL_LINE;
3394 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003395
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003396 if (iter->trace && iter->trace->print_line) {
3397 ret = iter->trace->print_line(iter);
3398 if (ret != TRACE_TYPE_UNHANDLED)
3399 return ret;
3400 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003401
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003402 if (iter->ent->type == TRACE_BPUTS &&
3403 trace_flags & TRACE_ITER_PRINTK &&
3404 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3405 return trace_print_bputs_msg_only(iter);
3406
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003407 if (iter->ent->type == TRACE_BPRINT &&
3408 trace_flags & TRACE_ITER_PRINTK &&
3409 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003410 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003411
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003412 if (iter->ent->type == TRACE_PRINT &&
3413 trace_flags & TRACE_ITER_PRINTK &&
3414 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003415 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003416
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003417 if (trace_flags & TRACE_ITER_BIN)
3418 return print_bin_fmt(iter);
3419
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003420 if (trace_flags & TRACE_ITER_HEX)
3421 return print_hex_fmt(iter);
3422
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003423 if (trace_flags & TRACE_ITER_RAW)
3424 return print_raw_fmt(iter);
3425
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003426 return print_trace_fmt(iter);
3427}
3428
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003429void trace_latency_header(struct seq_file *m)
3430{
3431 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003432 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003433
3434 /* print nothing if the buffers are empty */
3435 if (trace_empty(iter))
3436 return;
3437
3438 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3439 print_trace_header(m, iter);
3440
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003441 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003442 print_lat_help_header(m);
3443}
3444
Jiri Olsa62b915f2010-04-02 19:01:22 +02003445void trace_default_header(struct seq_file *m)
3446{
3447 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003448 struct trace_array *tr = iter->tr;
3449 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003450
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003451 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3452 return;
3453
Jiri Olsa62b915f2010-04-02 19:01:22 +02003454 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3455 /* print nothing if the buffers are empty */
3456 if (trace_empty(iter))
3457 return;
3458 print_trace_header(m, iter);
3459 if (!(trace_flags & TRACE_ITER_VERBOSE))
3460 print_lat_help_header(m);
3461 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003462 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3463 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003464 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003465 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003466 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003467 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003468 }
3469}
3470
Steven Rostedte0a413f2011-09-29 21:26:16 -04003471static void test_ftrace_alive(struct seq_file *m)
3472{
3473 if (!ftrace_is_dead())
3474 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003475 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3476 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003477}
3478
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003479#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003480static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003481{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003482 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3483 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3484 "# Takes a snapshot of the main buffer.\n"
3485 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3486 "# (Doesn't have to be '2' works with any number that\n"
3487 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003488}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003489
3490static void show_snapshot_percpu_help(struct seq_file *m)
3491{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003492 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003493#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003494 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3495 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003496#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003497 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3498 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003499#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003500 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3501 "# (Doesn't have to be '2' works with any number that\n"
3502 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003503}
3504
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003505static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3506{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003507 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003508 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003509 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003510 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003511
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003512 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003513 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3514 show_snapshot_main_help(m);
3515 else
3516 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003517}
3518#else
3519/* Should never be called */
3520static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3521#endif
3522
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003523static int s_show(struct seq_file *m, void *v)
3524{
3525 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003526 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003527
3528 if (iter->ent == NULL) {
3529 if (iter->tr) {
3530 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3531 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003532 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003534 if (iter->snapshot && trace_empty(iter))
3535 print_snapshot_help(m, iter);
3536 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003537 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003538 else
3539 trace_default_header(m);
3540
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003541 } else if (iter->leftover) {
3542 /*
3543 * If we filled the seq_file buffer earlier, we
3544 * want to just show it now.
3545 */
3546 ret = trace_print_seq(m, &iter->seq);
3547
3548 /* ret should this time be zero, but you never know */
3549 iter->leftover = ret;
3550
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003551 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003552 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003553 ret = trace_print_seq(m, &iter->seq);
3554 /*
3555 * If we overflow the seq_file buffer, then it will
3556 * ask us for this data again at start up.
3557 * Use that instead.
3558 * ret is 0 if seq_file write succeeded.
3559 * -1 otherwise.
3560 */
3561 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003562 }
3563
3564 return 0;
3565}
3566
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003567/*
3568 * Should be used after trace_array_get(), trace_types_lock
3569 * ensures that i_cdev was already initialized.
3570 */
3571static inline int tracing_get_cpu(struct inode *inode)
3572{
3573 if (inode->i_cdev) /* See trace_create_cpu_file() */
3574 return (long)inode->i_cdev - 1;
3575 return RING_BUFFER_ALL_CPUS;
3576}
3577
James Morris88e9d342009-09-22 16:43:43 -07003578static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003579 .start = s_start,
3580 .next = s_next,
3581 .stop = s_stop,
3582 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003583};
3584
Ingo Molnare309b412008-05-12 21:20:51 +02003585static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003586__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003587{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003588 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003589 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003590 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003591
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003592 if (tracing_disabled)
3593 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003594
Jiri Olsa50e18b92012-04-25 10:23:39 +02003595 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003596 if (!iter)
3597 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003598
Gil Fruchter72917232015-06-09 10:32:35 +03003599 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003600 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003601 if (!iter->buffer_iter)
3602 goto release;
3603
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003604 /*
3605 * We make a copy of the current tracer to avoid concurrent
3606 * changes on it while we are reading.
3607 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003608 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003609 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003610 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003611 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003612
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003613 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003614
Li Zefan79f55992009-06-15 14:58:26 +08003615 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003616 goto fail;
3617
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003618 iter->tr = tr;
3619
3620#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003621 /* Currently only the top directory has a snapshot */
3622 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003623 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003624 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003625#endif
3626 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003627 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003628 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003629 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003630 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003631
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003632 /* Notify the tracer early; before we stop tracing. */
3633 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003634 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003635
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003636 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003637 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003638 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3639
David Sharp8be07092012-11-13 12:18:22 -08003640 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003641 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003642 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3643
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003644 /* stop the trace while dumping if we are not opening "snapshot" */
3645 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003646 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003647
Steven Rostedtae3b5092013-01-23 15:22:59 -05003648 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003649 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003650 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003651 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003652 }
3653 ring_buffer_read_prepare_sync();
3654 for_each_tracing_cpu(cpu) {
3655 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003656 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003657 }
3658 } else {
3659 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003660 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003661 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003662 ring_buffer_read_prepare_sync();
3663 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003664 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003665 }
3666
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003667 mutex_unlock(&trace_types_lock);
3668
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003669 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003670
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003671 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003672 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003673 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003674 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003675release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003676 seq_release_private(inode, file);
3677 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003678}
3679
3680int tracing_open_generic(struct inode *inode, struct file *filp)
3681{
Steven Rostedt60a11772008-05-12 21:20:44 +02003682 if (tracing_disabled)
3683 return -ENODEV;
3684
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003685 filp->private_data = inode->i_private;
3686 return 0;
3687}
3688
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003689bool tracing_is_disabled(void)
3690{
3691 return (tracing_disabled) ? true: false;
3692}
3693
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003694/*
3695 * Open and update trace_array ref count.
3696 * Must have the current trace_array passed to it.
3697 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003698static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003699{
3700 struct trace_array *tr = inode->i_private;
3701
3702 if (tracing_disabled)
3703 return -ENODEV;
3704
3705 if (trace_array_get(tr) < 0)
3706 return -ENODEV;
3707
3708 filp->private_data = inode->i_private;
3709
3710 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003711}
3712
Hannes Eder4fd27352009-02-10 19:44:12 +01003713static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003714{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003715 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003716 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003717 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003718 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003719
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003720 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003721 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003722 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003723 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003724
Oleg Nesterov6484c712013-07-23 17:26:10 +02003725 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003726 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003727 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003728
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003729 for_each_tracing_cpu(cpu) {
3730 if (iter->buffer_iter[cpu])
3731 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3732 }
3733
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003734 if (iter->trace && iter->trace->close)
3735 iter->trace->close(iter);
3736
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003737 if (!iter->snapshot)
3738 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003739 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003740
3741 __trace_array_put(tr);
3742
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003743 mutex_unlock(&trace_types_lock);
3744
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003745 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003746 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003747 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003748 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003749 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003750
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003751 return 0;
3752}
3753
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003754static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3755{
3756 struct trace_array *tr = inode->i_private;
3757
3758 trace_array_put(tr);
3759 return 0;
3760}
3761
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003762static int tracing_single_release_tr(struct inode *inode, struct file *file)
3763{
3764 struct trace_array *tr = inode->i_private;
3765
3766 trace_array_put(tr);
3767
3768 return single_release(inode, file);
3769}
3770
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003771static int tracing_open(struct inode *inode, struct file *file)
3772{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003773 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003774 struct trace_iterator *iter;
3775 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003776
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003777 if (trace_array_get(tr) < 0)
3778 return -ENODEV;
3779
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003780 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003781 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3782 int cpu = tracing_get_cpu(inode);
3783
3784 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003785 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003786 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003787 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003788 }
3789
3790 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003791 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003792 if (IS_ERR(iter))
3793 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003794 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003795 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3796 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003797
3798 if (ret < 0)
3799 trace_array_put(tr);
3800
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003801 return ret;
3802}
3803
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003804/*
3805 * Some tracers are not suitable for instance buffers.
3806 * A tracer is always available for the global array (toplevel)
3807 * or if it explicitly states that it is.
3808 */
3809static bool
3810trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3811{
3812 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3813}
3814
3815/* Find the next tracer that this trace array may use */
3816static struct tracer *
3817get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3818{
3819 while (t && !trace_ok_for_array(t, tr))
3820 t = t->next;
3821
3822 return t;
3823}
3824
Ingo Molnare309b412008-05-12 21:20:51 +02003825static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003826t_next(struct seq_file *m, void *v, loff_t *pos)
3827{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003828 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003829 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003830
3831 (*pos)++;
3832
3833 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003834 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003835
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003836 return t;
3837}
3838
3839static void *t_start(struct seq_file *m, loff_t *pos)
3840{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003841 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003842 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003843 loff_t l = 0;
3844
3845 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003846
3847 t = get_tracer_for_array(tr, trace_types);
3848 for (; t && l < *pos; t = t_next(m, t, &l))
3849 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003850
3851 return t;
3852}
3853
3854static void t_stop(struct seq_file *m, void *p)
3855{
3856 mutex_unlock(&trace_types_lock);
3857}
3858
3859static int t_show(struct seq_file *m, void *v)
3860{
3861 struct tracer *t = v;
3862
3863 if (!t)
3864 return 0;
3865
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003866 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003867 if (t->next)
3868 seq_putc(m, ' ');
3869 else
3870 seq_putc(m, '\n');
3871
3872 return 0;
3873}
3874
James Morris88e9d342009-09-22 16:43:43 -07003875static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003876 .start = t_start,
3877 .next = t_next,
3878 .stop = t_stop,
3879 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003880};
3881
3882static int show_traces_open(struct inode *inode, struct file *file)
3883{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003884 struct trace_array *tr = inode->i_private;
3885 struct seq_file *m;
3886 int ret;
3887
Steven Rostedt60a11772008-05-12 21:20:44 +02003888 if (tracing_disabled)
3889 return -ENODEV;
3890
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003891 ret = seq_open(file, &show_traces_seq_ops);
3892 if (ret)
3893 return ret;
3894
3895 m = file->private_data;
3896 m->private = tr;
3897
3898 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003899}
3900
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003901static ssize_t
3902tracing_write_stub(struct file *filp, const char __user *ubuf,
3903 size_t count, loff_t *ppos)
3904{
3905 return count;
3906}
3907
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003908loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003909{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003910 int ret;
3911
Slava Pestov364829b2010-11-24 15:13:16 -08003912 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003913 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003914 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003915 file->f_pos = ret = 0;
3916
3917 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003918}
3919
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003920static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003921 .open = tracing_open,
3922 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003923 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003924 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003925 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003926};
3927
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003928static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003929 .open = show_traces_open,
3930 .read = seq_read,
3931 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003932 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003933};
3934
Ingo Molnar36dfe922008-05-12 21:20:52 +02003935/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003936 * The tracer itself will not take this lock, but still we want
3937 * to provide a consistent cpumask to user-space:
3938 */
3939static DEFINE_MUTEX(tracing_cpumask_update_lock);
3940
3941/*
3942 * Temporary storage for the character representation of the
3943 * CPU bitmask (and one more byte for the newline):
3944 */
3945static char mask_str[NR_CPUS + 1];
3946
Ingo Molnarc7078de2008-05-12 21:20:52 +02003947static ssize_t
3948tracing_cpumask_read(struct file *filp, char __user *ubuf,
3949 size_t count, loff_t *ppos)
3950{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003951 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003952 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003953
3954 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003955
Tejun Heo1a402432015-02-13 14:37:39 -08003956 len = snprintf(mask_str, count, "%*pb\n",
3957 cpumask_pr_args(tr->tracing_cpumask));
3958 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003959 count = -EINVAL;
3960 goto out_err;
3961 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003962 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3963
3964out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003965 mutex_unlock(&tracing_cpumask_update_lock);
3966
3967 return count;
3968}
3969
3970static ssize_t
3971tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3972 size_t count, loff_t *ppos)
3973{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003974 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303975 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003976 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303977
3978 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3979 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003980
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303981 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003982 if (err)
3983 goto err_unlock;
3984
Li Zefan215368e2009-06-15 10:56:42 +08003985 mutex_lock(&tracing_cpumask_update_lock);
3986
Steven Rostedta5e25882008-12-02 15:34:05 -05003987 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003988 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003989 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003990 /*
3991 * Increase/decrease the disabled counter if we are
3992 * about to flip a bit in the cpumask:
3993 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003994 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303995 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003996 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3997 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003998 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003999 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304000 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004001 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4002 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004003 }
4004 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004005 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004006 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004007
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004008 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004009
Ingo Molnarc7078de2008-05-12 21:20:52 +02004010 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304011 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004012
Ingo Molnarc7078de2008-05-12 21:20:52 +02004013 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004014
4015err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004016 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004017
4018 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004019}
4020
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004021static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004022 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004023 .read = tracing_cpumask_read,
4024 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004025 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004026 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004027};
4028
Li Zefanfdb372e2009-12-08 11:15:59 +08004029static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004030{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004031 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004032 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004033 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004034 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004035
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004036 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004037 tracer_flags = tr->current_trace->flags->val;
4038 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004039
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004040 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004041 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004042 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004043 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004044 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004045 }
4046
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004047 for (i = 0; trace_opts[i].name; i++) {
4048 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004049 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004050 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004051 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004052 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004053 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004054
Li Zefanfdb372e2009-12-08 11:15:59 +08004055 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004056}
4057
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004058static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004059 struct tracer_flags *tracer_flags,
4060 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004061{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004062 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004063 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004064
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004065 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004066 if (ret)
4067 return ret;
4068
4069 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004070 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004071 else
Zhaolei77708412009-08-07 18:53:21 +08004072 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004073 return 0;
4074}
4075
Li Zefan8d18eaa2009-12-08 11:17:06 +08004076/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004077static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004078{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004079 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004080 struct tracer_flags *tracer_flags = trace->flags;
4081 struct tracer_opt *opts = NULL;
4082 int i;
4083
4084 for (i = 0; tracer_flags->opts[i].name; i++) {
4085 opts = &tracer_flags->opts[i];
4086
4087 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004088 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004089 }
4090
4091 return -EINVAL;
4092}
4093
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004094/* Some tracers require overwrite to stay enabled */
4095int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4096{
4097 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4098 return -1;
4099
4100 return 0;
4101}
4102
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004103int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004104{
4105 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004106 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004107 return 0;
4108
4109 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004110 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004111 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004112 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004113
4114 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004115 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004116 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004117 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004118
4119 if (mask == TRACE_ITER_RECORD_CMD)
4120 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004121
Steven Rostedtc37775d2016-04-13 16:59:18 -04004122 if (mask == TRACE_ITER_EVENT_FORK)
4123 trace_event_follow_fork(tr, enabled);
4124
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004125 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004126 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004127#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004128 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004129#endif
4130 }
Steven Rostedt81698832012-10-11 10:15:05 -04004131
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004132 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004133 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004134 trace_printk_control(enabled);
4135 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004136
4137 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004138}
4139
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004140static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004141{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004142 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004143 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004144 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004145 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004146 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004147
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004148 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004149
Li Zefan8d18eaa2009-12-08 11:17:06 +08004150 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004151 neg = 1;
4152 cmp += 2;
4153 }
4154
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004155 mutex_lock(&trace_types_lock);
4156
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004157 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004158 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004159 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004160 break;
4161 }
4162 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004163
4164 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004165 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004166 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004167
4168 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004169
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004170 /*
4171 * If the first trailing whitespace is replaced with '\0' by strstrip,
4172 * turn it back into a space.
4173 */
4174 if (orig_len > strlen(option))
4175 option[strlen(option)] = ' ';
4176
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004177 return ret;
4178}
4179
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004180static void __init apply_trace_boot_options(void)
4181{
4182 char *buf = trace_boot_options_buf;
4183 char *option;
4184
4185 while (true) {
4186 option = strsep(&buf, ",");
4187
4188 if (!option)
4189 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004190
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004191 if (*option)
4192 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004193
4194 /* Put back the comma to allow this to be called again */
4195 if (buf)
4196 *(buf - 1) = ',';
4197 }
4198}
4199
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004200static ssize_t
4201tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4202 size_t cnt, loff_t *ppos)
4203{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004204 struct seq_file *m = filp->private_data;
4205 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004206 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004207 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004208
4209 if (cnt >= sizeof(buf))
4210 return -EINVAL;
4211
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004212 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004213 return -EFAULT;
4214
Steven Rostedta8dd2172013-01-09 20:54:17 -05004215 buf[cnt] = 0;
4216
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004217 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004218 if (ret < 0)
4219 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004220
Jiri Olsacf8517c2009-10-23 19:36:16 -04004221 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004222
4223 return cnt;
4224}
4225
Li Zefanfdb372e2009-12-08 11:15:59 +08004226static int tracing_trace_options_open(struct inode *inode, struct file *file)
4227{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004228 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004229 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004230
Li Zefanfdb372e2009-12-08 11:15:59 +08004231 if (tracing_disabled)
4232 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004233
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004234 if (trace_array_get(tr) < 0)
4235 return -ENODEV;
4236
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004237 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4238 if (ret < 0)
4239 trace_array_put(tr);
4240
4241 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004242}
4243
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004244static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004245 .open = tracing_trace_options_open,
4246 .read = seq_read,
4247 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004248 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004249 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004250};
4251
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004252static const char readme_msg[] =
4253 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004254 "# echo 0 > tracing_on : quick way to disable tracing\n"
4255 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4256 " Important files:\n"
4257 " trace\t\t\t- The static contents of the buffer\n"
4258 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4259 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4260 " current_tracer\t- function and latency tracers\n"
4261 " available_tracers\t- list of configured tracers for current_tracer\n"
4262 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4263 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4264 " trace_clock\t\t-change the clock used to order events\n"
4265 " local: Per cpu clock but may not be synced across CPUs\n"
4266 " global: Synced across CPUs but slows tracing down.\n"
4267 " counter: Not a clock, but just an increment\n"
4268 " uptime: Jiffy counter from time of boot\n"
4269 " perf: Same clock that perf events use\n"
4270#ifdef CONFIG_X86_64
4271 " x86-tsc: TSC cycle counter\n"
4272#endif
4273 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004274 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004275 " tracing_cpumask\t- Limit which CPUs to trace\n"
4276 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4277 "\t\t\t Remove sub-buffer with rmdir\n"
4278 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004279 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4280 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004281 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004282#ifdef CONFIG_DYNAMIC_FTRACE
4283 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004284 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4285 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004286 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004287 "\t modules: Can select a group via module\n"
4288 "\t Format: :mod:<module-name>\n"
4289 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4290 "\t triggers: a command to perform when function is hit\n"
4291 "\t Format: <function>:<trigger>[:count]\n"
4292 "\t trigger: traceon, traceoff\n"
4293 "\t\t enable_event:<system>:<event>\n"
4294 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004295#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004296 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004297#endif
4298#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004299 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004300#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004301 "\t\t dump\n"
4302 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004303 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4304 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4305 "\t The first one will disable tracing every time do_fault is hit\n"
4306 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4307 "\t The first time do trap is hit and it disables tracing, the\n"
4308 "\t counter will decrement to 2. If tracing is already disabled,\n"
4309 "\t the counter will not decrement. It only decrements when the\n"
4310 "\t trigger did work\n"
4311 "\t To remove trigger without count:\n"
4312 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4313 "\t To remove trigger with a count:\n"
4314 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004315 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004316 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4317 "\t modules: Can select a group via module command :mod:\n"
4318 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004319#endif /* CONFIG_DYNAMIC_FTRACE */
4320#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004321 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4322 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004323#endif
4324#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4325 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004326 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004327 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4328#endif
4329#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004330 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4331 "\t\t\t snapshot buffer. Read the contents for more\n"
4332 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004333#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004334#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004335 " stack_trace\t\t- Shows the max stack trace when active\n"
4336 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004337 "\t\t\t Write into this file to reset the max size (trigger a\n"
4338 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004339#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004340 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4341 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004342#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004343#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu86425622016-08-18 17:58:15 +09004344#ifdef CONFIG_KPROBE_EVENT
4345 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4346 "\t\t\t Write into this file to define/undefine new trace events.\n"
4347#endif
4348#ifdef CONFIG_UPROBE_EVENT
4349 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4350 "\t\t\t Write into this file to define/undefine new trace events.\n"
4351#endif
4352#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
4353 "\t accepts: event-definitions (one definition per line)\n"
4354 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4355 "\t -:[<group>/]<event>\n"
4356#ifdef CONFIG_KPROBE_EVENT
4357 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4358#endif
4359#ifdef CONFIG_UPROBE_EVENT
4360 "\t place: <path>:<offset>\n"
4361#endif
4362 "\t args: <name>=fetcharg[:type]\n"
4363 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4364 "\t $stack<index>, $stack, $retval, $comm\n"
4365 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4366 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4367#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004368 " events/\t\t- Directory containing all trace event subsystems:\n"
4369 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4370 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004371 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4372 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004373 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004374 " events/<system>/<event>/\t- Directory containing control files for\n"
4375 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004376 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4377 " filter\t\t- If set, only events passing filter are traced\n"
4378 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004379 "\t Format: <trigger>[:count][if <filter>]\n"
4380 "\t trigger: traceon, traceoff\n"
4381 "\t enable_event:<system>:<event>\n"
4382 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004383#ifdef CONFIG_HIST_TRIGGERS
4384 "\t enable_hist:<system>:<event>\n"
4385 "\t disable_hist:<system>:<event>\n"
4386#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004387#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004388 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004389#endif
4390#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004391 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004392#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004393#ifdef CONFIG_HIST_TRIGGERS
4394 "\t\t hist (see below)\n"
4395#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004396 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4397 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4398 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4399 "\t events/block/block_unplug/trigger\n"
4400 "\t The first disables tracing every time block_unplug is hit.\n"
4401 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4402 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4403 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4404 "\t Like function triggers, the counter is only decremented if it\n"
4405 "\t enabled or disabled tracing.\n"
4406 "\t To remove a trigger without a count:\n"
4407 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4408 "\t To remove a trigger with a count:\n"
4409 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4410 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004411#ifdef CONFIG_HIST_TRIGGERS
4412 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004413 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004414 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004415 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004416 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004417 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004418 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004419 "\t [if <filter>]\n\n"
4420 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004421 "\t table using the key(s) and value(s) named, and the value of a\n"
4422 "\t sum called 'hitcount' is incremented. Keys and values\n"
4423 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004424 "\t can be any field, or the special string 'stacktrace'.\n"
4425 "\t Compound keys consisting of up to two fields can be specified\n"
4426 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4427 "\t fields. Sort keys consisting of up to two fields can be\n"
4428 "\t specified using the 'sort' keyword. The sort direction can\n"
4429 "\t be modified by appending '.descending' or '.ascending' to a\n"
4430 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004431 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4432 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4433 "\t its histogram data will be shared with other triggers of the\n"
4434 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004435 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004436 "\t table in its entirety to stdout. If there are multiple hist\n"
4437 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004438 "\t trigger in the output. The table displayed for a named\n"
4439 "\t trigger will be the same as any other instance having the\n"
4440 "\t same name. The default format used to display a given field\n"
4441 "\t can be modified by appending any of the following modifiers\n"
4442 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004443 "\t .hex display a number as a hex value\n"
4444 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004445 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004446 "\t .execname display a common_pid as a program name\n"
4447 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004448 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004449 "\t The 'pause' parameter can be used to pause an existing hist\n"
4450 "\t trigger or to start a hist trigger but not log any events\n"
4451 "\t until told to do so. 'continue' can be used to start or\n"
4452 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004453 "\t The 'clear' parameter will clear the contents of a running\n"
4454 "\t hist trigger and leave its current paused/active state\n"
4455 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004456 "\t The enable_hist and disable_hist triggers can be used to\n"
4457 "\t have one event conditionally start and stop another event's\n"
4458 "\t already-attached hist trigger. The syntax is analagous to\n"
4459 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004460#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004461;
4462
4463static ssize_t
4464tracing_readme_read(struct file *filp, char __user *ubuf,
4465 size_t cnt, loff_t *ppos)
4466{
4467 return simple_read_from_buffer(ubuf, cnt, ppos,
4468 readme_msg, strlen(readme_msg));
4469}
4470
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004471static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004472 .open = tracing_open_generic,
4473 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004474 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004475};
4476
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004477static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004478{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004479 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004480
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004481 if (*pos || m->count)
4482 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004483
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004484 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004485
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004486 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4487 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004488 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004489 continue;
4490
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004491 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004492 }
4493
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004494 return NULL;
4495}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004496
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004497static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4498{
4499 void *v;
4500 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004501
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004502 preempt_disable();
4503 arch_spin_lock(&trace_cmdline_lock);
4504
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004505 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004506 while (l <= *pos) {
4507 v = saved_cmdlines_next(m, v, &l);
4508 if (!v)
4509 return NULL;
4510 }
4511
4512 return v;
4513}
4514
4515static void saved_cmdlines_stop(struct seq_file *m, void *v)
4516{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004517 arch_spin_unlock(&trace_cmdline_lock);
4518 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004519}
4520
4521static int saved_cmdlines_show(struct seq_file *m, void *v)
4522{
4523 char buf[TASK_COMM_LEN];
4524 unsigned int *pid = v;
4525
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004526 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004527 seq_printf(m, "%d %s\n", *pid, buf);
4528 return 0;
4529}
4530
4531static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4532 .start = saved_cmdlines_start,
4533 .next = saved_cmdlines_next,
4534 .stop = saved_cmdlines_stop,
4535 .show = saved_cmdlines_show,
4536};
4537
4538static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4539{
4540 if (tracing_disabled)
4541 return -ENODEV;
4542
4543 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004544}
4545
4546static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004547 .open = tracing_saved_cmdlines_open,
4548 .read = seq_read,
4549 .llseek = seq_lseek,
4550 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004551};
4552
4553static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004554tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4555 size_t cnt, loff_t *ppos)
4556{
4557 char buf[64];
4558 int r;
4559
4560 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004561 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004562 arch_spin_unlock(&trace_cmdline_lock);
4563
4564 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4565}
4566
4567static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4568{
4569 kfree(s->saved_cmdlines);
4570 kfree(s->map_cmdline_to_pid);
4571 kfree(s);
4572}
4573
4574static int tracing_resize_saved_cmdlines(unsigned int val)
4575{
4576 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4577
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004578 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004579 if (!s)
4580 return -ENOMEM;
4581
4582 if (allocate_cmdlines_buffer(val, s) < 0) {
4583 kfree(s);
4584 return -ENOMEM;
4585 }
4586
4587 arch_spin_lock(&trace_cmdline_lock);
4588 savedcmd_temp = savedcmd;
4589 savedcmd = s;
4590 arch_spin_unlock(&trace_cmdline_lock);
4591 free_saved_cmdlines_buffer(savedcmd_temp);
4592
4593 return 0;
4594}
4595
4596static ssize_t
4597tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4598 size_t cnt, loff_t *ppos)
4599{
4600 unsigned long val;
4601 int ret;
4602
4603 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4604 if (ret)
4605 return ret;
4606
4607 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4608 if (!val || val > PID_MAX_DEFAULT)
4609 return -EINVAL;
4610
4611 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4612 if (ret < 0)
4613 return ret;
4614
4615 *ppos += cnt;
4616
4617 return cnt;
4618}
4619
4620static const struct file_operations tracing_saved_cmdlines_size_fops = {
4621 .open = tracing_open_generic,
4622 .read = tracing_saved_cmdlines_size_read,
4623 .write = tracing_saved_cmdlines_size_write,
4624};
4625
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004626#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4627static union trace_enum_map_item *
4628update_enum_map(union trace_enum_map_item *ptr)
4629{
4630 if (!ptr->map.enum_string) {
4631 if (ptr->tail.next) {
4632 ptr = ptr->tail.next;
4633 /* Set ptr to the next real item (skip head) */
4634 ptr++;
4635 } else
4636 return NULL;
4637 }
4638 return ptr;
4639}
4640
4641static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4642{
4643 union trace_enum_map_item *ptr = v;
4644
4645 /*
4646 * Paranoid! If ptr points to end, we don't want to increment past it.
4647 * This really should never happen.
4648 */
4649 ptr = update_enum_map(ptr);
4650 if (WARN_ON_ONCE(!ptr))
4651 return NULL;
4652
4653 ptr++;
4654
4655 (*pos)++;
4656
4657 ptr = update_enum_map(ptr);
4658
4659 return ptr;
4660}
4661
4662static void *enum_map_start(struct seq_file *m, loff_t *pos)
4663{
4664 union trace_enum_map_item *v;
4665 loff_t l = 0;
4666
4667 mutex_lock(&trace_enum_mutex);
4668
4669 v = trace_enum_maps;
4670 if (v)
4671 v++;
4672
4673 while (v && l < *pos) {
4674 v = enum_map_next(m, v, &l);
4675 }
4676
4677 return v;
4678}
4679
4680static void enum_map_stop(struct seq_file *m, void *v)
4681{
4682 mutex_unlock(&trace_enum_mutex);
4683}
4684
4685static int enum_map_show(struct seq_file *m, void *v)
4686{
4687 union trace_enum_map_item *ptr = v;
4688
4689 seq_printf(m, "%s %ld (%s)\n",
4690 ptr->map.enum_string, ptr->map.enum_value,
4691 ptr->map.system);
4692
4693 return 0;
4694}
4695
4696static const struct seq_operations tracing_enum_map_seq_ops = {
4697 .start = enum_map_start,
4698 .next = enum_map_next,
4699 .stop = enum_map_stop,
4700 .show = enum_map_show,
4701};
4702
4703static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4704{
4705 if (tracing_disabled)
4706 return -ENODEV;
4707
4708 return seq_open(filp, &tracing_enum_map_seq_ops);
4709}
4710
4711static const struct file_operations tracing_enum_map_fops = {
4712 .open = tracing_enum_map_open,
4713 .read = seq_read,
4714 .llseek = seq_lseek,
4715 .release = seq_release,
4716};
4717
4718static inline union trace_enum_map_item *
4719trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4720{
4721 /* Return tail of array given the head */
4722 return ptr + ptr->head.length + 1;
4723}
4724
4725static void
4726trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4727 int len)
4728{
4729 struct trace_enum_map **stop;
4730 struct trace_enum_map **map;
4731 union trace_enum_map_item *map_array;
4732 union trace_enum_map_item *ptr;
4733
4734 stop = start + len;
4735
4736 /*
4737 * The trace_enum_maps contains the map plus a head and tail item,
4738 * where the head holds the module and length of array, and the
4739 * tail holds a pointer to the next list.
4740 */
4741 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4742 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004743 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004744 return;
4745 }
4746
4747 mutex_lock(&trace_enum_mutex);
4748
4749 if (!trace_enum_maps)
4750 trace_enum_maps = map_array;
4751 else {
4752 ptr = trace_enum_maps;
4753 for (;;) {
4754 ptr = trace_enum_jmp_to_tail(ptr);
4755 if (!ptr->tail.next)
4756 break;
4757 ptr = ptr->tail.next;
4758
4759 }
4760 ptr->tail.next = map_array;
4761 }
4762 map_array->head.mod = mod;
4763 map_array->head.length = len;
4764 map_array++;
4765
4766 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4767 map_array->map = **map;
4768 map_array++;
4769 }
4770 memset(map_array, 0, sizeof(*map_array));
4771
4772 mutex_unlock(&trace_enum_mutex);
4773}
4774
4775static void trace_create_enum_file(struct dentry *d_tracer)
4776{
4777 trace_create_file("enum_map", 0444, d_tracer,
4778 NULL, &tracing_enum_map_fops);
4779}
4780
4781#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4782static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4783static inline void trace_insert_enum_map_file(struct module *mod,
4784 struct trace_enum_map **start, int len) { }
4785#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4786
4787static void trace_insert_enum_map(struct module *mod,
4788 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004789{
4790 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004791
4792 if (len <= 0)
4793 return;
4794
4795 map = start;
4796
4797 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004798
4799 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004800}
4801
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004802static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004803tracing_set_trace_read(struct file *filp, char __user *ubuf,
4804 size_t cnt, loff_t *ppos)
4805{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004806 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004807 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004808 int r;
4809
4810 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004811 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004812 mutex_unlock(&trace_types_lock);
4813
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004814 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004815}
4816
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004817int tracer_init(struct tracer *t, struct trace_array *tr)
4818{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004819 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004820 return t->init(tr);
4821}
4822
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004823static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004824{
4825 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004826
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004827 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004828 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004829}
4830
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004831#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004832/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004833static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4834 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004835{
4836 int cpu, ret = 0;
4837
4838 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4839 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004840 ret = ring_buffer_resize(trace_buf->buffer,
4841 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004842 if (ret < 0)
4843 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004844 per_cpu_ptr(trace_buf->data, cpu)->entries =
4845 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004846 }
4847 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004848 ret = ring_buffer_resize(trace_buf->buffer,
4849 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004850 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004851 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4852 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004853 }
4854
4855 return ret;
4856}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004857#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004858
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004859static int __tracing_resize_ring_buffer(struct trace_array *tr,
4860 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004861{
4862 int ret;
4863
4864 /*
4865 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004866 * we use the size that was given, and we can forget about
4867 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004868 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004869 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004870
Steven Rostedtb382ede62012-10-10 21:44:34 -04004871 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004872 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004873 return 0;
4874
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004875 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004876 if (ret < 0)
4877 return ret;
4878
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004879#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004880 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4881 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004882 goto out;
4883
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004884 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004885 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004886 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4887 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004888 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004889 /*
4890 * AARGH! We are left with different
4891 * size max buffer!!!!
4892 * The max buffer is our "snapshot" buffer.
4893 * When a tracer needs a snapshot (one of the
4894 * latency tracers), it swaps the max buffer
4895 * with the saved snap shot. We succeeded to
4896 * update the size of the main buffer, but failed to
4897 * update the size of the max buffer. But when we tried
4898 * to reset the main buffer to the original size, we
4899 * failed there too. This is very unlikely to
4900 * happen, but if it does, warn and kill all
4901 * tracing.
4902 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004903 WARN_ON(1);
4904 tracing_disabled = 1;
4905 }
4906 return ret;
4907 }
4908
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004909 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004910 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004911 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004912 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004913
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004914 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004915#endif /* CONFIG_TRACER_MAX_TRACE */
4916
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004917 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004918 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004919 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004920 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004921
4922 return ret;
4923}
4924
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004925static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4926 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004927{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004928 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004929
4930 mutex_lock(&trace_types_lock);
4931
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004932 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4933 /* make sure, this cpu is enabled in the mask */
4934 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4935 ret = -EINVAL;
4936 goto out;
4937 }
4938 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004939
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004940 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004941 if (ret < 0)
4942 ret = -ENOMEM;
4943
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004944out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004945 mutex_unlock(&trace_types_lock);
4946
4947 return ret;
4948}
4949
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004950
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004951/**
4952 * tracing_update_buffers - used by tracing facility to expand ring buffers
4953 *
4954 * To save on memory when the tracing is never used on a system with it
4955 * configured in. The ring buffers are set to a minimum size. But once
4956 * a user starts to use the tracing facility, then they need to grow
4957 * to their default size.
4958 *
4959 * This function is to be called when a tracer is about to be used.
4960 */
4961int tracing_update_buffers(void)
4962{
4963 int ret = 0;
4964
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004965 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004966 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004967 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004968 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004969 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004970
4971 return ret;
4972}
4973
Steven Rostedt577b7852009-02-26 23:43:05 -05004974struct trace_option_dentry;
4975
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004976static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004977create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004978
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004979/*
4980 * Used to clear out the tracer before deletion of an instance.
4981 * Must have trace_types_lock held.
4982 */
4983static void tracing_set_nop(struct trace_array *tr)
4984{
4985 if (tr->current_trace == &nop_trace)
4986 return;
4987
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004988 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004989
4990 if (tr->current_trace->reset)
4991 tr->current_trace->reset(tr);
4992
4993 tr->current_trace = &nop_trace;
4994}
4995
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004996static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004997{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004998 /* Only enable if the directory has been created already. */
4999 if (!tr->dir)
5000 return;
5001
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005002 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005003}
5004
5005static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5006{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005007 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005008#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005009 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005010#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005011 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005012
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005013 mutex_lock(&trace_types_lock);
5014
Steven Rostedt73c51622009-03-11 13:42:01 -04005015 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005016 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005017 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005018 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005019 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005020 ret = 0;
5021 }
5022
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005023 for (t = trace_types; t; t = t->next) {
5024 if (strcmp(t->name, buf) == 0)
5025 break;
5026 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005027 if (!t) {
5028 ret = -EINVAL;
5029 goto out;
5030 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005031 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005032 goto out;
5033
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005034 /* Some tracers are only allowed for the top level buffer */
5035 if (!trace_ok_for_array(t, tr)) {
5036 ret = -EINVAL;
5037 goto out;
5038 }
5039
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005040 /* If trace pipe files are being read, we can't change the tracer */
5041 if (tr->current_trace->ref) {
5042 ret = -EBUSY;
5043 goto out;
5044 }
5045
Steven Rostedt9f029e82008-11-12 15:24:24 -05005046 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005047
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005048 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005049
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005050 if (tr->current_trace->reset)
5051 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005052
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005053 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005054 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005055
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005056#ifdef CONFIG_TRACER_MAX_TRACE
5057 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005058
5059 if (had_max_tr && !t->use_max_tr) {
5060 /*
5061 * We need to make sure that the update_max_tr sees that
5062 * current_trace changed to nop_trace to keep it from
5063 * swapping the buffers after we resize it.
5064 * The update_max_tr is called from interrupts disabled
5065 * so a synchronized_sched() is sufficient.
5066 */
5067 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005068 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005069 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005070#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005071
5072#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005073 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005074 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005075 if (ret < 0)
5076 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005077 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005078#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005079
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005080 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005081 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005082 if (ret)
5083 goto out;
5084 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005085
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005086 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005087 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005088 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005089 out:
5090 mutex_unlock(&trace_types_lock);
5091
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005092 return ret;
5093}
5094
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005095static ssize_t
5096tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5097 size_t cnt, loff_t *ppos)
5098{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005099 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005100 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005101 int i;
5102 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005103 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005104
Steven Rostedt60063a62008-10-28 10:44:24 -04005105 ret = cnt;
5106
Li Zefanee6c2c12009-09-18 14:06:47 +08005107 if (cnt > MAX_TRACER_SIZE)
5108 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005109
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005110 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005111 return -EFAULT;
5112
5113 buf[cnt] = 0;
5114
5115 /* strip ending whitespace. */
5116 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5117 buf[i] = 0;
5118
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005119 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005120 if (err)
5121 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005122
Jiri Olsacf8517c2009-10-23 19:36:16 -04005123 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005124
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005125 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005126}
5127
5128static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005129tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5130 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005131{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005132 char buf[64];
5133 int r;
5134
Steven Rostedtcffae432008-05-12 21:21:00 +02005135 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005136 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005137 if (r > sizeof(buf))
5138 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005139 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005140}
5141
5142static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005143tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5144 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005145{
Hannes Eder5e398412009-02-10 19:44:34 +01005146 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005147 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005148
Peter Huewe22fe9b52011-06-07 21:58:27 +02005149 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5150 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005151 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005152
5153 *ptr = val * 1000;
5154
5155 return cnt;
5156}
5157
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005158static ssize_t
5159tracing_thresh_read(struct file *filp, char __user *ubuf,
5160 size_t cnt, loff_t *ppos)
5161{
5162 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5163}
5164
5165static ssize_t
5166tracing_thresh_write(struct file *filp, const char __user *ubuf,
5167 size_t cnt, loff_t *ppos)
5168{
5169 struct trace_array *tr = filp->private_data;
5170 int ret;
5171
5172 mutex_lock(&trace_types_lock);
5173 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5174 if (ret < 0)
5175 goto out;
5176
5177 if (tr->current_trace->update_thresh) {
5178 ret = tr->current_trace->update_thresh(tr);
5179 if (ret < 0)
5180 goto out;
5181 }
5182
5183 ret = cnt;
5184out:
5185 mutex_unlock(&trace_types_lock);
5186
5187 return ret;
5188}
5189
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005190#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005191
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005192static ssize_t
5193tracing_max_lat_read(struct file *filp, char __user *ubuf,
5194 size_t cnt, loff_t *ppos)
5195{
5196 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5197}
5198
5199static ssize_t
5200tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5201 size_t cnt, loff_t *ppos)
5202{
5203 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5204}
5205
Chen Gange428abb2015-11-10 05:15:15 +08005206#endif
5207
Steven Rostedtb3806b42008-05-12 21:20:46 +02005208static int tracing_open_pipe(struct inode *inode, struct file *filp)
5209{
Oleg Nesterov15544202013-07-23 17:25:57 +02005210 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005211 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005212 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005213
5214 if (tracing_disabled)
5215 return -ENODEV;
5216
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005217 if (trace_array_get(tr) < 0)
5218 return -ENODEV;
5219
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005220 mutex_lock(&trace_types_lock);
5221
Steven Rostedtb3806b42008-05-12 21:20:46 +02005222 /* create a buffer to store the information to pass to userspace */
5223 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005224 if (!iter) {
5225 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005226 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005227 goto out;
5228 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005229
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005230 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005231 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005232
5233 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5234 ret = -ENOMEM;
5235 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305236 }
5237
Steven Rostedta3097202008-11-07 22:36:02 -05005238 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305239 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005240
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005241 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005242 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5243
David Sharp8be07092012-11-13 12:18:22 -08005244 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005245 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005246 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5247
Oleg Nesterov15544202013-07-23 17:25:57 +02005248 iter->tr = tr;
5249 iter->trace_buffer = &tr->trace_buffer;
5250 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005251 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005252 filp->private_data = iter;
5253
Steven Rostedt107bad82008-05-12 21:21:01 +02005254 if (iter->trace->pipe_open)
5255 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005256
Arnd Bergmannb4447862010-07-07 23:40:11 +02005257 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005258
5259 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005260out:
5261 mutex_unlock(&trace_types_lock);
5262 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005263
5264fail:
5265 kfree(iter->trace);
5266 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005267 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005268 mutex_unlock(&trace_types_lock);
5269 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005270}
5271
5272static int tracing_release_pipe(struct inode *inode, struct file *file)
5273{
5274 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005275 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005276
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005277 mutex_lock(&trace_types_lock);
5278
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005279 tr->current_trace->ref--;
5280
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005281 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005282 iter->trace->pipe_close(iter);
5283
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005284 mutex_unlock(&trace_types_lock);
5285
Rusty Russell44623442009-01-01 10:12:23 +10305286 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005287 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005288 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005289
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005290 trace_array_put(tr);
5291
Steven Rostedtb3806b42008-05-12 21:20:46 +02005292 return 0;
5293}
5294
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005295static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005296trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005297{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005298 struct trace_array *tr = iter->tr;
5299
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005300 /* Iterators are static, they should be filled or empty */
5301 if (trace_buffer_iter(iter, iter->cpu_file))
5302 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005303
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005304 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005305 /*
5306 * Always select as readable when in blocking mode
5307 */
5308 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005309 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005310 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005311 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005312}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005313
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005314static unsigned int
5315tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5316{
5317 struct trace_iterator *iter = filp->private_data;
5318
5319 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005320}
5321
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005322/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005323static int tracing_wait_pipe(struct file *filp)
5324{
5325 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005326 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005327
5328 while (trace_empty(iter)) {
5329
5330 if ((filp->f_flags & O_NONBLOCK)) {
5331 return -EAGAIN;
5332 }
5333
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005334 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005335 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005336 * We still block if tracing is disabled, but we have never
5337 * read anything. This allows a user to cat this file, and
5338 * then enable tracing. But after we have read something,
5339 * we give an EOF when tracing is again disabled.
5340 *
5341 * iter->pos will be 0 if we haven't read anything.
5342 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005343 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005344 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005345
5346 mutex_unlock(&iter->mutex);
5347
Rabin Vincente30f53a2014-11-10 19:46:34 +01005348 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005349
5350 mutex_lock(&iter->mutex);
5351
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005352 if (ret)
5353 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005354 }
5355
5356 return 1;
5357}
5358
Steven Rostedtb3806b42008-05-12 21:20:46 +02005359/*
5360 * Consumer reader.
5361 */
5362static ssize_t
5363tracing_read_pipe(struct file *filp, char __user *ubuf,
5364 size_t cnt, loff_t *ppos)
5365{
5366 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005367 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005368
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005369 /*
5370 * Avoid more than one consumer on a single file descriptor
5371 * This is just a matter of traces coherency, the ring buffer itself
5372 * is protected.
5373 */
5374 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005375
5376 /* return any leftover data */
5377 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5378 if (sret != -EBUSY)
5379 goto out;
5380
5381 trace_seq_init(&iter->seq);
5382
Steven Rostedt107bad82008-05-12 21:21:01 +02005383 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005384 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5385 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005386 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005387 }
5388
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005389waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005390 sret = tracing_wait_pipe(filp);
5391 if (sret <= 0)
5392 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005393
5394 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005395 if (trace_empty(iter)) {
5396 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005397 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005398 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005399
5400 if (cnt >= PAGE_SIZE)
5401 cnt = PAGE_SIZE - 1;
5402
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005403 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005404 memset(&iter->seq, 0,
5405 sizeof(struct trace_iterator) -
5406 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005407 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005408 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005409
Lai Jiangshan4f535962009-05-18 19:35:34 +08005410 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005411 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005412 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005413 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005414 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005415
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005416 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005417 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005418 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005419 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005420 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005421 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005422 if (ret != TRACE_TYPE_NO_CONSUME)
5423 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005424
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005425 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005426 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005427
5428 /*
5429 * Setting the full flag means we reached the trace_seq buffer
5430 * size and we should leave by partial output condition above.
5431 * One of the trace_seq_* functions is not used properly.
5432 */
5433 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5434 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005435 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005436 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005437 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005438
Steven Rostedtb3806b42008-05-12 21:20:46 +02005439 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005440 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005441 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005442 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005443
5444 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005445 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005446 * entries, go back to wait for more entries.
5447 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005448 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005449 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005450
Steven Rostedt107bad82008-05-12 21:21:01 +02005451out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005452 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005453
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005454 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005455}
5456
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005457static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5458 unsigned int idx)
5459{
5460 __free_page(spd->pages[idx]);
5461}
5462
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005463static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005464 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005465 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005466 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005467 .steal = generic_pipe_buf_steal,
5468 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005469};
5470
Steven Rostedt34cd4992009-02-09 12:06:29 -05005471static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005472tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005473{
5474 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005475 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005476 int ret;
5477
5478 /* Seq buffer is page-sized, exactly what we need. */
5479 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005480 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005481 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005482
5483 if (trace_seq_has_overflowed(&iter->seq)) {
5484 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005485 break;
5486 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005487
5488 /*
5489 * This should not be hit, because it should only
5490 * be set if the iter->seq overflowed. But check it
5491 * anyway to be safe.
5492 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005493 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005494 iter->seq.seq.len = save_len;
5495 break;
5496 }
5497
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005498 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005499 if (rem < count) {
5500 rem = 0;
5501 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005502 break;
5503 }
5504
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005505 if (ret != TRACE_TYPE_NO_CONSUME)
5506 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005507 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005508 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005509 rem = 0;
5510 iter->ent = NULL;
5511 break;
5512 }
5513 }
5514
5515 return rem;
5516}
5517
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005518static ssize_t tracing_splice_read_pipe(struct file *filp,
5519 loff_t *ppos,
5520 struct pipe_inode_info *pipe,
5521 size_t len,
5522 unsigned int flags)
5523{
Jens Axboe35f3d142010-05-20 10:43:18 +02005524 struct page *pages_def[PIPE_DEF_BUFFERS];
5525 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005526 struct trace_iterator *iter = filp->private_data;
5527 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005528 .pages = pages_def,
5529 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005530 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005531 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005532 .flags = flags,
5533 .ops = &tracing_pipe_buf_ops,
5534 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005535 };
5536 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005537 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005538 unsigned int i;
5539
Jens Axboe35f3d142010-05-20 10:43:18 +02005540 if (splice_grow_spd(pipe, &spd))
5541 return -ENOMEM;
5542
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005543 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005544
5545 if (iter->trace->splice_read) {
5546 ret = iter->trace->splice_read(iter, filp,
5547 ppos, pipe, len, flags);
5548 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005549 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005550 }
5551
5552 ret = tracing_wait_pipe(filp);
5553 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005554 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005555
Jason Wessel955b61e2010-08-05 09:22:23 -05005556 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005557 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005558 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005559 }
5560
Lai Jiangshan4f535962009-05-18 19:35:34 +08005561 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005562 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005563
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005564 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005565 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005566 spd.pages[i] = alloc_page(GFP_KERNEL);
5567 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005568 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005569
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005570 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005571
5572 /* Copy the data into the page, so we can start over. */
5573 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005574 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005575 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005576 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005577 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005578 break;
5579 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005580 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005581 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005582
Steven Rostedtf9520752009-03-02 14:04:40 -05005583 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005584 }
5585
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005586 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005587 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005588 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005589
5590 spd.nr_pages = i;
5591
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005592 if (i)
5593 ret = splice_to_pipe(pipe, &spd);
5594 else
5595 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005596out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005597 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005598 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005599
Steven Rostedt34cd4992009-02-09 12:06:29 -05005600out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005601 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005602 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005603}
5604
Steven Rostedta98a3c32008-05-12 21:20:59 +02005605static ssize_t
5606tracing_entries_read(struct file *filp, char __user *ubuf,
5607 size_t cnt, loff_t *ppos)
5608{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005609 struct inode *inode = file_inode(filp);
5610 struct trace_array *tr = inode->i_private;
5611 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005612 char buf[64];
5613 int r = 0;
5614 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005615
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005616 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005617
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005618 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005619 int cpu, buf_size_same;
5620 unsigned long size;
5621
5622 size = 0;
5623 buf_size_same = 1;
5624 /* check if all cpu sizes are same */
5625 for_each_tracing_cpu(cpu) {
5626 /* fill in the size from first enabled cpu */
5627 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005628 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5629 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005630 buf_size_same = 0;
5631 break;
5632 }
5633 }
5634
5635 if (buf_size_same) {
5636 if (!ring_buffer_expanded)
5637 r = sprintf(buf, "%lu (expanded: %lu)\n",
5638 size >> 10,
5639 trace_buf_size >> 10);
5640 else
5641 r = sprintf(buf, "%lu\n", size >> 10);
5642 } else
5643 r = sprintf(buf, "X\n");
5644 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005645 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005646
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005647 mutex_unlock(&trace_types_lock);
5648
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005649 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5650 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005651}
5652
5653static ssize_t
5654tracing_entries_write(struct file *filp, const char __user *ubuf,
5655 size_t cnt, loff_t *ppos)
5656{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005657 struct inode *inode = file_inode(filp);
5658 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005659 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005660 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005661
Peter Huewe22fe9b52011-06-07 21:58:27 +02005662 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5663 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005664 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005665
5666 /* must have at least 1 entry */
5667 if (!val)
5668 return -EINVAL;
5669
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005670 /* value is in KB */
5671 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005672 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005673 if (ret < 0)
5674 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005675
Jiri Olsacf8517c2009-10-23 19:36:16 -04005676 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005677
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005678 return cnt;
5679}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005680
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005681static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005682tracing_total_entries_read(struct file *filp, char __user *ubuf,
5683 size_t cnt, loff_t *ppos)
5684{
5685 struct trace_array *tr = filp->private_data;
5686 char buf[64];
5687 int r, cpu;
5688 unsigned long size = 0, expanded_size = 0;
5689
5690 mutex_lock(&trace_types_lock);
5691 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005692 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005693 if (!ring_buffer_expanded)
5694 expanded_size += trace_buf_size >> 10;
5695 }
5696 if (ring_buffer_expanded)
5697 r = sprintf(buf, "%lu\n", size);
5698 else
5699 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5700 mutex_unlock(&trace_types_lock);
5701
5702 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5703}
5704
5705static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005706tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5707 size_t cnt, loff_t *ppos)
5708{
5709 /*
5710 * There is no need to read what the user has written, this function
5711 * is just to make sure that there is no error when "echo" is used
5712 */
5713
5714 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005715
5716 return cnt;
5717}
5718
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005719static int
5720tracing_free_buffer_release(struct inode *inode, struct file *filp)
5721{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005722 struct trace_array *tr = inode->i_private;
5723
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005724 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005725 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005726 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005727 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005728 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005729
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005730 trace_array_put(tr);
5731
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005732 return 0;
5733}
5734
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005735static ssize_t
5736tracing_mark_write(struct file *filp, const char __user *ubuf,
5737 size_t cnt, loff_t *fpos)
5738{
Alexander Z Lam2d716192013-07-01 15:31:24 -07005739 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005740 struct ring_buffer_event *event;
5741 struct ring_buffer *buffer;
5742 struct print_entry *entry;
5743 unsigned long irq_flags;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005744 const char faulted[] = "<faulted>";
Steven Rostedtd696b582011-09-22 11:50:27 -04005745 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005746 int size;
5747 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005748
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005749/* Used in tracing_mark_raw_write() as well */
5750#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005751
Steven Rostedtc76f0692008-11-07 22:36:02 -05005752 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005753 return -EINVAL;
5754
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005755 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005756 return -EINVAL;
5757
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005758 if (cnt > TRACE_BUF_SIZE)
5759 cnt = TRACE_BUF_SIZE;
5760
Steven Rostedtd696b582011-09-22 11:50:27 -04005761 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005762
Steven Rostedtd696b582011-09-22 11:50:27 -04005763 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005764 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
5765
5766 /* If less than "<faulted>", then make sure we can still add that */
5767 if (cnt < FAULTED_SIZE)
5768 size += FAULTED_SIZE - cnt;
5769
Alexander Z Lam2d716192013-07-01 15:31:24 -07005770 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05005771 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5772 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005773 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04005774 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005775 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04005776
5777 entry = ring_buffer_event_data(event);
5778 entry->ip = _THIS_IP_;
5779
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005780 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
5781 if (len) {
5782 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5783 cnt = FAULTED_SIZE;
5784 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04005785 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005786 written = cnt;
5787 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04005788
5789 if (entry->buf[cnt - 1] != '\n') {
5790 entry->buf[cnt] = '\n';
5791 entry->buf[cnt + 1] = '\0';
5792 } else
5793 entry->buf[cnt] = '\0';
5794
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005795 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005796
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005797 if (written > 0)
5798 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005799
Steven Rostedtfa32e852016-07-06 15:25:08 -04005800 return written;
5801}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005802
Steven Rostedtfa32e852016-07-06 15:25:08 -04005803/* Limit it for now to 3K (including tag) */
5804#define RAW_DATA_MAX_SIZE (1024*3)
5805
5806static ssize_t
5807tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
5808 size_t cnt, loff_t *fpos)
5809{
5810 struct trace_array *tr = filp->private_data;
5811 struct ring_buffer_event *event;
5812 struct ring_buffer *buffer;
5813 struct raw_data_entry *entry;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005814 const char faulted[] = "<faulted>";
Steven Rostedtfa32e852016-07-06 15:25:08 -04005815 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005816 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005817 int size;
5818 int len;
5819
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005820#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
5821
Steven Rostedtfa32e852016-07-06 15:25:08 -04005822 if (tracing_disabled)
5823 return -EINVAL;
5824
5825 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5826 return -EINVAL;
5827
5828 /* The marker must at least have a tag id */
5829 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
5830 return -EINVAL;
5831
5832 if (cnt > TRACE_BUF_SIZE)
5833 cnt = TRACE_BUF_SIZE;
5834
5835 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5836
Steven Rostedtfa32e852016-07-06 15:25:08 -04005837 local_save_flags(irq_flags);
5838 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005839 if (cnt < FAULT_SIZE_ID)
5840 size += FAULT_SIZE_ID - cnt;
5841
Steven Rostedtfa32e852016-07-06 15:25:08 -04005842 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05005843 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
5844 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005845 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04005846 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005847 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005848
5849 entry = ring_buffer_event_data(event);
5850
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005851 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
5852 if (len) {
5853 entry->id = -1;
5854 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5855 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005856 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005857 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005858
5859 __buffer_unlock_commit(buffer, event);
5860
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005861 if (written > 0)
5862 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005863
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005864 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005865}
5866
Li Zefan13f16d22009-12-08 11:16:11 +08005867static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005868{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005869 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005870 int i;
5871
5872 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005873 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005874 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005875 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5876 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005877 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005878
Li Zefan13f16d22009-12-08 11:16:11 +08005879 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005880}
5881
Steven Rostedte1e232c2014-02-10 23:38:46 -05005882static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005883{
Zhaolei5079f322009-08-25 16:12:56 +08005884 int i;
5885
Zhaolei5079f322009-08-25 16:12:56 +08005886 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5887 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5888 break;
5889 }
5890 if (i == ARRAY_SIZE(trace_clocks))
5891 return -EINVAL;
5892
Zhaolei5079f322009-08-25 16:12:56 +08005893 mutex_lock(&trace_types_lock);
5894
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005895 tr->clock_id = i;
5896
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005897 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005898
David Sharp60303ed2012-10-11 16:27:52 -07005899 /*
5900 * New clock may not be consistent with the previous clock.
5901 * Reset the buffer so that it doesn't have incomparable timestamps.
5902 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005903 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005904
5905#ifdef CONFIG_TRACER_MAX_TRACE
5906 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5907 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005908 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005909#endif
David Sharp60303ed2012-10-11 16:27:52 -07005910
Zhaolei5079f322009-08-25 16:12:56 +08005911 mutex_unlock(&trace_types_lock);
5912
Steven Rostedte1e232c2014-02-10 23:38:46 -05005913 return 0;
5914}
5915
5916static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5917 size_t cnt, loff_t *fpos)
5918{
5919 struct seq_file *m = filp->private_data;
5920 struct trace_array *tr = m->private;
5921 char buf[64];
5922 const char *clockstr;
5923 int ret;
5924
5925 if (cnt >= sizeof(buf))
5926 return -EINVAL;
5927
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005928 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05005929 return -EFAULT;
5930
5931 buf[cnt] = 0;
5932
5933 clockstr = strstrip(buf);
5934
5935 ret = tracing_set_clock(tr, clockstr);
5936 if (ret)
5937 return ret;
5938
Zhaolei5079f322009-08-25 16:12:56 +08005939 *fpos += cnt;
5940
5941 return cnt;
5942}
5943
Li Zefan13f16d22009-12-08 11:16:11 +08005944static int tracing_clock_open(struct inode *inode, struct file *file)
5945{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005946 struct trace_array *tr = inode->i_private;
5947 int ret;
5948
Li Zefan13f16d22009-12-08 11:16:11 +08005949 if (tracing_disabled)
5950 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005951
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005952 if (trace_array_get(tr))
5953 return -ENODEV;
5954
5955 ret = single_open(file, tracing_clock_show, inode->i_private);
5956 if (ret < 0)
5957 trace_array_put(tr);
5958
5959 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005960}
5961
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005962struct ftrace_buffer_info {
5963 struct trace_iterator iter;
5964 void *spare;
5965 unsigned int read;
5966};
5967
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005968#ifdef CONFIG_TRACER_SNAPSHOT
5969static int tracing_snapshot_open(struct inode *inode, struct file *file)
5970{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005971 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005972 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005973 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005974 int ret = 0;
5975
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005976 if (trace_array_get(tr) < 0)
5977 return -ENODEV;
5978
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005979 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005980 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005981 if (IS_ERR(iter))
5982 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005983 } else {
5984 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005985 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005986 m = kzalloc(sizeof(*m), GFP_KERNEL);
5987 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005988 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005989 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5990 if (!iter) {
5991 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005992 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005993 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005994 ret = 0;
5995
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005996 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005997 iter->trace_buffer = &tr->max_buffer;
5998 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005999 m->private = iter;
6000 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006001 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006002out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006003 if (ret < 0)
6004 trace_array_put(tr);
6005
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006006 return ret;
6007}
6008
6009static ssize_t
6010tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6011 loff_t *ppos)
6012{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006013 struct seq_file *m = filp->private_data;
6014 struct trace_iterator *iter = m->private;
6015 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006016 unsigned long val;
6017 int ret;
6018
6019 ret = tracing_update_buffers();
6020 if (ret < 0)
6021 return ret;
6022
6023 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6024 if (ret)
6025 return ret;
6026
6027 mutex_lock(&trace_types_lock);
6028
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006029 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006030 ret = -EBUSY;
6031 goto out;
6032 }
6033
6034 switch (val) {
6035 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006036 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6037 ret = -EINVAL;
6038 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006039 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006040 if (tr->allocated_snapshot)
6041 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006042 break;
6043 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006044/* Only allow per-cpu swap if the ring buffer supports it */
6045#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6046 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6047 ret = -EINVAL;
6048 break;
6049 }
6050#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006051 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006052 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006053 if (ret < 0)
6054 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006055 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006056 local_irq_disable();
6057 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006058 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006059 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006060 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006061 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006062 local_irq_enable();
6063 break;
6064 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006065 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006066 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6067 tracing_reset_online_cpus(&tr->max_buffer);
6068 else
6069 tracing_reset(&tr->max_buffer, iter->cpu_file);
6070 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006071 break;
6072 }
6073
6074 if (ret >= 0) {
6075 *ppos += cnt;
6076 ret = cnt;
6077 }
6078out:
6079 mutex_unlock(&trace_types_lock);
6080 return ret;
6081}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006082
6083static int tracing_snapshot_release(struct inode *inode, struct file *file)
6084{
6085 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006086 int ret;
6087
6088 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006089
6090 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006091 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006092
6093 /* If write only, the seq_file is just a stub */
6094 if (m)
6095 kfree(m->private);
6096 kfree(m);
6097
6098 return 0;
6099}
6100
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006101static int tracing_buffers_open(struct inode *inode, struct file *filp);
6102static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6103 size_t count, loff_t *ppos);
6104static int tracing_buffers_release(struct inode *inode, struct file *file);
6105static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6106 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6107
6108static int snapshot_raw_open(struct inode *inode, struct file *filp)
6109{
6110 struct ftrace_buffer_info *info;
6111 int ret;
6112
6113 ret = tracing_buffers_open(inode, filp);
6114 if (ret < 0)
6115 return ret;
6116
6117 info = filp->private_data;
6118
6119 if (info->iter.trace->use_max_tr) {
6120 tracing_buffers_release(inode, filp);
6121 return -EBUSY;
6122 }
6123
6124 info->iter.snapshot = true;
6125 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6126
6127 return ret;
6128}
6129
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006130#endif /* CONFIG_TRACER_SNAPSHOT */
6131
6132
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006133static const struct file_operations tracing_thresh_fops = {
6134 .open = tracing_open_generic,
6135 .read = tracing_thresh_read,
6136 .write = tracing_thresh_write,
6137 .llseek = generic_file_llseek,
6138};
6139
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006140#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006141static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006142 .open = tracing_open_generic,
6143 .read = tracing_max_lat_read,
6144 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006145 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006146};
Chen Gange428abb2015-11-10 05:15:15 +08006147#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006148
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006149static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006150 .open = tracing_open_generic,
6151 .read = tracing_set_trace_read,
6152 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006153 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006154};
6155
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006156static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006157 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006158 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006159 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006160 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006161 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006162 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006163};
6164
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006165static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006166 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006167 .read = tracing_entries_read,
6168 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006169 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006170 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006171};
6172
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006173static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006174 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006175 .read = tracing_total_entries_read,
6176 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006177 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006178};
6179
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006180static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006181 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006182 .write = tracing_free_buffer_write,
6183 .release = tracing_free_buffer_release,
6184};
6185
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006186static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006187 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006188 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006189 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006190 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006191};
6192
Steven Rostedtfa32e852016-07-06 15:25:08 -04006193static const struct file_operations tracing_mark_raw_fops = {
6194 .open = tracing_open_generic_tr,
6195 .write = tracing_mark_raw_write,
6196 .llseek = generic_file_llseek,
6197 .release = tracing_release_generic_tr,
6198};
6199
Zhaolei5079f322009-08-25 16:12:56 +08006200static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006201 .open = tracing_clock_open,
6202 .read = seq_read,
6203 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006204 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006205 .write = tracing_clock_write,
6206};
6207
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006208#ifdef CONFIG_TRACER_SNAPSHOT
6209static const struct file_operations snapshot_fops = {
6210 .open = tracing_snapshot_open,
6211 .read = seq_read,
6212 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006213 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006214 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006215};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006216
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006217static const struct file_operations snapshot_raw_fops = {
6218 .open = snapshot_raw_open,
6219 .read = tracing_buffers_read,
6220 .release = tracing_buffers_release,
6221 .splice_read = tracing_buffers_splice_read,
6222 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006223};
6224
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006225#endif /* CONFIG_TRACER_SNAPSHOT */
6226
Steven Rostedt2cadf912008-12-01 22:20:19 -05006227static int tracing_buffers_open(struct inode *inode, struct file *filp)
6228{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006229 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006230 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006231 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006232
6233 if (tracing_disabled)
6234 return -ENODEV;
6235
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006236 if (trace_array_get(tr) < 0)
6237 return -ENODEV;
6238
Steven Rostedt2cadf912008-12-01 22:20:19 -05006239 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006240 if (!info) {
6241 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006242 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006243 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006244
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006245 mutex_lock(&trace_types_lock);
6246
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006247 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006248 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006249 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006250 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006251 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006252 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006253 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006254
6255 filp->private_data = info;
6256
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006257 tr->current_trace->ref++;
6258
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006259 mutex_unlock(&trace_types_lock);
6260
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006261 ret = nonseekable_open(inode, filp);
6262 if (ret < 0)
6263 trace_array_put(tr);
6264
6265 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006266}
6267
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006268static unsigned int
6269tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6270{
6271 struct ftrace_buffer_info *info = filp->private_data;
6272 struct trace_iterator *iter = &info->iter;
6273
6274 return trace_poll(iter, filp, poll_table);
6275}
6276
Steven Rostedt2cadf912008-12-01 22:20:19 -05006277static ssize_t
6278tracing_buffers_read(struct file *filp, char __user *ubuf,
6279 size_t count, loff_t *ppos)
6280{
6281 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006282 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006283 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006284 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006285
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006286 if (!count)
6287 return 0;
6288
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006289#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006290 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6291 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006292#endif
6293
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006294 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006295 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6296 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006297 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006298 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006299
Steven Rostedt2cadf912008-12-01 22:20:19 -05006300 /* Do we have previous read data to read? */
6301 if (info->read < PAGE_SIZE)
6302 goto read;
6303
Steven Rostedtb6273442013-02-28 13:44:11 -05006304 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006305 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006306 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006307 &info->spare,
6308 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006309 iter->cpu_file, 0);
6310 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006311
6312 if (ret < 0) {
6313 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006314 if ((filp->f_flags & O_NONBLOCK))
6315 return -EAGAIN;
6316
Rabin Vincente30f53a2014-11-10 19:46:34 +01006317 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006318 if (ret)
6319 return ret;
6320
Steven Rostedtb6273442013-02-28 13:44:11 -05006321 goto again;
6322 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006323 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006324 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006325
Steven Rostedt436fc282011-10-14 10:44:25 -04006326 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006327 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006328 size = PAGE_SIZE - info->read;
6329 if (size > count)
6330 size = count;
6331
6332 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006333 if (ret == size)
6334 return -EFAULT;
6335
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006336 size -= ret;
6337
Steven Rostedt2cadf912008-12-01 22:20:19 -05006338 *ppos += size;
6339 info->read += size;
6340
6341 return size;
6342}
6343
6344static int tracing_buffers_release(struct inode *inode, struct file *file)
6345{
6346 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006347 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006348
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006349 mutex_lock(&trace_types_lock);
6350
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006351 iter->tr->current_trace->ref--;
6352
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006353 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006354
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006355 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006356 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006357 kfree(info);
6358
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006359 mutex_unlock(&trace_types_lock);
6360
Steven Rostedt2cadf912008-12-01 22:20:19 -05006361 return 0;
6362}
6363
6364struct buffer_ref {
6365 struct ring_buffer *buffer;
6366 void *page;
6367 int ref;
6368};
6369
6370static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6371 struct pipe_buffer *buf)
6372{
6373 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6374
6375 if (--ref->ref)
6376 return;
6377
6378 ring_buffer_free_read_page(ref->buffer, ref->page);
6379 kfree(ref);
6380 buf->private = 0;
6381}
6382
Steven Rostedt2cadf912008-12-01 22:20:19 -05006383static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6384 struct pipe_buffer *buf)
6385{
6386 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6387
6388 ref->ref++;
6389}
6390
6391/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006392static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006393 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006394 .confirm = generic_pipe_buf_confirm,
6395 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006396 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006397 .get = buffer_pipe_buf_get,
6398};
6399
6400/*
6401 * Callback from splice_to_pipe(), if we need to release some pages
6402 * at the end of the spd in case we error'ed out in filling the pipe.
6403 */
6404static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6405{
6406 struct buffer_ref *ref =
6407 (struct buffer_ref *)spd->partial[i].private;
6408
6409 if (--ref->ref)
6410 return;
6411
6412 ring_buffer_free_read_page(ref->buffer, ref->page);
6413 kfree(ref);
6414 spd->partial[i].private = 0;
6415}
6416
6417static ssize_t
6418tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6419 struct pipe_inode_info *pipe, size_t len,
6420 unsigned int flags)
6421{
6422 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006423 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006424 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6425 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006426 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006427 .pages = pages_def,
6428 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006429 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006430 .flags = flags,
6431 .ops = &buffer_pipe_buf_ops,
6432 .spd_release = buffer_spd_release,
6433 };
6434 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04006435 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006436 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006437
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006438#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006439 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6440 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006441#endif
6442
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006443 if (*ppos & (PAGE_SIZE - 1))
6444 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006445
6446 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006447 if (len < PAGE_SIZE)
6448 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006449 len &= PAGE_MASK;
6450 }
6451
Al Viro1ae22932016-09-17 18:31:46 -04006452 if (splice_grow_spd(pipe, &spd))
6453 return -ENOMEM;
6454
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006455 again:
6456 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006457 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006458
Al Viroa786c062014-04-11 12:01:03 -04006459 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006460 struct page *page;
6461 int r;
6462
6463 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006464 if (!ref) {
6465 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006466 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006467 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006468
Steven Rostedt7267fa62009-04-29 00:16:21 -04006469 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006470 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006471 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006472 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006473 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006474 kfree(ref);
6475 break;
6476 }
6477
6478 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006479 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006480 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006481 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006482 kfree(ref);
6483 break;
6484 }
6485
6486 /*
6487 * zero out any left over data, this is going to
6488 * user land.
6489 */
6490 size = ring_buffer_page_len(ref->page);
6491 if (size < PAGE_SIZE)
6492 memset(ref->page + size, 0, PAGE_SIZE - size);
6493
6494 page = virt_to_page(ref->page);
6495
6496 spd.pages[i] = page;
6497 spd.partial[i].len = PAGE_SIZE;
6498 spd.partial[i].offset = 0;
6499 spd.partial[i].private = (unsigned long)ref;
6500 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006501 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006502
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006503 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006504 }
6505
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006506 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006507 spd.nr_pages = i;
6508
6509 /* did we read anything? */
6510 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006511 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006512 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006513
Al Viro1ae22932016-09-17 18:31:46 -04006514 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006515 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006516 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006517
Rabin Vincente30f53a2014-11-10 19:46:34 +01006518 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006519 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006520 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006521
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006522 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006523 }
6524
6525 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006526out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006527 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006528
Steven Rostedt2cadf912008-12-01 22:20:19 -05006529 return ret;
6530}
6531
6532static const struct file_operations tracing_buffers_fops = {
6533 .open = tracing_buffers_open,
6534 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006535 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006536 .release = tracing_buffers_release,
6537 .splice_read = tracing_buffers_splice_read,
6538 .llseek = no_llseek,
6539};
6540
Steven Rostedtc8d77182009-04-29 18:03:45 -04006541static ssize_t
6542tracing_stats_read(struct file *filp, char __user *ubuf,
6543 size_t count, loff_t *ppos)
6544{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006545 struct inode *inode = file_inode(filp);
6546 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006547 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006548 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006549 struct trace_seq *s;
6550 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006551 unsigned long long t;
6552 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006553
Li Zefane4f2d102009-06-15 10:57:28 +08006554 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006555 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006556 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006557
6558 trace_seq_init(s);
6559
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006560 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006561 trace_seq_printf(s, "entries: %ld\n", cnt);
6562
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006563 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006564 trace_seq_printf(s, "overrun: %ld\n", cnt);
6565
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006566 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006567 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6568
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006569 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006570 trace_seq_printf(s, "bytes: %ld\n", cnt);
6571
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006572 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006573 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006574 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006575 usec_rem = do_div(t, USEC_PER_SEC);
6576 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6577 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006578
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006579 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006580 usec_rem = do_div(t, USEC_PER_SEC);
6581 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6582 } else {
6583 /* counter or tsc mode for trace_clock */
6584 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006585 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006586
6587 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006588 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006589 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006590
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006591 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006592 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6593
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006594 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006595 trace_seq_printf(s, "read events: %ld\n", cnt);
6596
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006597 count = simple_read_from_buffer(ubuf, count, ppos,
6598 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006599
6600 kfree(s);
6601
6602 return count;
6603}
6604
6605static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006606 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006607 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006608 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006609 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006610};
6611
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006612#ifdef CONFIG_DYNAMIC_FTRACE
6613
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006614int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006615{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006616 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006617}
6618
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006619static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006620tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006621 size_t cnt, loff_t *ppos)
6622{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006623 static char ftrace_dyn_info_buffer[1024];
6624 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006625 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006626 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006627 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006628 int r;
6629
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006630 mutex_lock(&dyn_info_mutex);
6631 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006632
Steven Rostedta26a2a22008-10-31 00:03:22 -04006633 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006634 buf[r++] = '\n';
6635
6636 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6637
6638 mutex_unlock(&dyn_info_mutex);
6639
6640 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006641}
6642
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006643static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006644 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006645 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006646 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006647};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006648#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006649
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006650#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6651static void
6652ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006653{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006654 tracing_snapshot();
6655}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006656
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006657static void
6658ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6659{
6660 unsigned long *count = (long *)data;
6661
6662 if (!*count)
6663 return;
6664
6665 if (*count != -1)
6666 (*count)--;
6667
6668 tracing_snapshot();
6669}
6670
6671static int
6672ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6673 struct ftrace_probe_ops *ops, void *data)
6674{
6675 long count = (long)data;
6676
6677 seq_printf(m, "%ps:", (void *)ip);
6678
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006679 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006680
6681 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006682 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006683 else
6684 seq_printf(m, ":count=%ld\n", count);
6685
6686 return 0;
6687}
6688
6689static struct ftrace_probe_ops snapshot_probe_ops = {
6690 .func = ftrace_snapshot,
6691 .print = ftrace_snapshot_print,
6692};
6693
6694static struct ftrace_probe_ops snapshot_count_probe_ops = {
6695 .func = ftrace_count_snapshot,
6696 .print = ftrace_snapshot_print,
6697};
6698
6699static int
6700ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6701 char *glob, char *cmd, char *param, int enable)
6702{
6703 struct ftrace_probe_ops *ops;
6704 void *count = (void *)-1;
6705 char *number;
6706 int ret;
6707
6708 /* hash funcs only work with set_ftrace_filter */
6709 if (!enable)
6710 return -EINVAL;
6711
6712 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6713
6714 if (glob[0] == '!') {
6715 unregister_ftrace_function_probe_func(glob+1, ops);
6716 return 0;
6717 }
6718
6719 if (!param)
6720 goto out_reg;
6721
6722 number = strsep(&param, ":");
6723
6724 if (!strlen(number))
6725 goto out_reg;
6726
6727 /*
6728 * We use the callback data field (which is a pointer)
6729 * as our counter.
6730 */
6731 ret = kstrtoul(number, 0, (unsigned long *)&count);
6732 if (ret)
6733 return ret;
6734
6735 out_reg:
6736 ret = register_ftrace_function_probe(glob, ops, count);
6737
6738 if (ret >= 0)
6739 alloc_snapshot(&global_trace);
6740
6741 return ret < 0 ? ret : 0;
6742}
6743
6744static struct ftrace_func_command ftrace_snapshot_cmd = {
6745 .name = "snapshot",
6746 .func = ftrace_trace_snapshot_callback,
6747};
6748
Tom Zanussi38de93a2013-10-24 08:34:18 -05006749static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006750{
6751 return register_ftrace_command(&ftrace_snapshot_cmd);
6752}
6753#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006754static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006755#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006756
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006757static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006758{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006759 if (WARN_ON(!tr->dir))
6760 return ERR_PTR(-ENODEV);
6761
6762 /* Top directory uses NULL as the parent */
6763 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6764 return NULL;
6765
6766 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006767 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006768}
6769
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006770static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6771{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006772 struct dentry *d_tracer;
6773
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006774 if (tr->percpu_dir)
6775 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006776
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006777 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006778 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006779 return NULL;
6780
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006781 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006782
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006783 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006784 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006785
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006786 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006787}
6788
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006789static struct dentry *
6790trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6791 void *data, long cpu, const struct file_operations *fops)
6792{
6793 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6794
6795 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006796 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006797 return ret;
6798}
6799
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006800static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006801tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006802{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006803 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006804 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006805 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006806
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006807 if (!d_percpu)
6808 return;
6809
Steven Rostedtdd49a382010-10-20 21:51:26 -04006810 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006811 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006812 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006813 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006814 return;
6815 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006816
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006817 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006818 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006819 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006820
6821 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006822 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006823 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006824
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006825 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006826 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006827
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006828 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006829 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006830
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006831 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006832 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006833
6834#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006835 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006836 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006837
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006838 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006839 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006840#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006841}
6842
Steven Rostedt60a11772008-05-12 21:20:44 +02006843#ifdef CONFIG_FTRACE_SELFTEST
6844/* Let selftest have access to static functions in this file */
6845#include "trace_selftest.c"
6846#endif
6847
Steven Rostedt577b7852009-02-26 23:43:05 -05006848static ssize_t
6849trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6850 loff_t *ppos)
6851{
6852 struct trace_option_dentry *topt = filp->private_data;
6853 char *buf;
6854
6855 if (topt->flags->val & topt->opt->bit)
6856 buf = "1\n";
6857 else
6858 buf = "0\n";
6859
6860 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6861}
6862
6863static ssize_t
6864trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6865 loff_t *ppos)
6866{
6867 struct trace_option_dentry *topt = filp->private_data;
6868 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006869 int ret;
6870
Peter Huewe22fe9b52011-06-07 21:58:27 +02006871 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6872 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006873 return ret;
6874
Li Zefan8d18eaa2009-12-08 11:17:06 +08006875 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006876 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006877
6878 if (!!(topt->flags->val & topt->opt->bit) != val) {
6879 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006880 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006881 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006882 mutex_unlock(&trace_types_lock);
6883 if (ret)
6884 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006885 }
6886
6887 *ppos += cnt;
6888
6889 return cnt;
6890}
6891
6892
6893static const struct file_operations trace_options_fops = {
6894 .open = tracing_open_generic,
6895 .read = trace_options_read,
6896 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006897 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006898};
6899
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006900/*
6901 * In order to pass in both the trace_array descriptor as well as the index
6902 * to the flag that the trace option file represents, the trace_array
6903 * has a character array of trace_flags_index[], which holds the index
6904 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6905 * The address of this character array is passed to the flag option file
6906 * read/write callbacks.
6907 *
6908 * In order to extract both the index and the trace_array descriptor,
6909 * get_tr_index() uses the following algorithm.
6910 *
6911 * idx = *ptr;
6912 *
6913 * As the pointer itself contains the address of the index (remember
6914 * index[1] == 1).
6915 *
6916 * Then to get the trace_array descriptor, by subtracting that index
6917 * from the ptr, we get to the start of the index itself.
6918 *
6919 * ptr - idx == &index[0]
6920 *
6921 * Then a simple container_of() from that pointer gets us to the
6922 * trace_array descriptor.
6923 */
6924static void get_tr_index(void *data, struct trace_array **ptr,
6925 unsigned int *pindex)
6926{
6927 *pindex = *(unsigned char *)data;
6928
6929 *ptr = container_of(data - *pindex, struct trace_array,
6930 trace_flags_index);
6931}
6932
Steven Rostedta8259072009-02-26 22:19:12 -05006933static ssize_t
6934trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6935 loff_t *ppos)
6936{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006937 void *tr_index = filp->private_data;
6938 struct trace_array *tr;
6939 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006940 char *buf;
6941
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006942 get_tr_index(tr_index, &tr, &index);
6943
6944 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006945 buf = "1\n";
6946 else
6947 buf = "0\n";
6948
6949 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6950}
6951
6952static ssize_t
6953trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6954 loff_t *ppos)
6955{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006956 void *tr_index = filp->private_data;
6957 struct trace_array *tr;
6958 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006959 unsigned long val;
6960 int ret;
6961
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006962 get_tr_index(tr_index, &tr, &index);
6963
Peter Huewe22fe9b52011-06-07 21:58:27 +02006964 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6965 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006966 return ret;
6967
Zhaoleif2d84b62009-08-07 18:55:48 +08006968 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006969 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006970
6971 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006972 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006973 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006974
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006975 if (ret < 0)
6976 return ret;
6977
Steven Rostedta8259072009-02-26 22:19:12 -05006978 *ppos += cnt;
6979
6980 return cnt;
6981}
6982
Steven Rostedta8259072009-02-26 22:19:12 -05006983static const struct file_operations trace_options_core_fops = {
6984 .open = tracing_open_generic,
6985 .read = trace_options_core_read,
6986 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006987 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006988};
6989
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006990struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04006991 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006992 struct dentry *parent,
6993 void *data,
6994 const struct file_operations *fops)
6995{
6996 struct dentry *ret;
6997
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006998 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006999 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007000 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007001
7002 return ret;
7003}
7004
7005
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007006static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007007{
7008 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007009
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007010 if (tr->options)
7011 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007012
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007013 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007014 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007015 return NULL;
7016
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007017 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007018 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007019 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007020 return NULL;
7021 }
7022
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007023 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007024}
7025
Steven Rostedt577b7852009-02-26 23:43:05 -05007026static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007027create_trace_option_file(struct trace_array *tr,
7028 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007029 struct tracer_flags *flags,
7030 struct tracer_opt *opt)
7031{
7032 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007033
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007034 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007035 if (!t_options)
7036 return;
7037
7038 topt->flags = flags;
7039 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007040 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007041
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007042 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007043 &trace_options_fops);
7044
Steven Rostedt577b7852009-02-26 23:43:05 -05007045}
7046
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007047static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007048create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007049{
7050 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007051 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007052 struct tracer_flags *flags;
7053 struct tracer_opt *opts;
7054 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007055 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007056
7057 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007058 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007059
7060 flags = tracer->flags;
7061
7062 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007063 return;
7064
7065 /*
7066 * If this is an instance, only create flags for tracers
7067 * the instance may have.
7068 */
7069 if (!trace_ok_for_array(tracer, tr))
7070 return;
7071
7072 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007073 /* Make sure there's no duplicate flags. */
7074 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007075 return;
7076 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007077
7078 opts = flags->opts;
7079
7080 for (cnt = 0; opts[cnt].name; cnt++)
7081 ;
7082
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007083 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007084 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007085 return;
7086
7087 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7088 GFP_KERNEL);
7089 if (!tr_topts) {
7090 kfree(topts);
7091 return;
7092 }
7093
7094 tr->topts = tr_topts;
7095 tr->topts[tr->nr_topts].tracer = tracer;
7096 tr->topts[tr->nr_topts].topts = topts;
7097 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007098
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007099 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007100 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007101 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007102 WARN_ONCE(topts[cnt].entry == NULL,
7103 "Failed to create trace option: %s",
7104 opts[cnt].name);
7105 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007106}
7107
Steven Rostedta8259072009-02-26 22:19:12 -05007108static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007109create_trace_option_core_file(struct trace_array *tr,
7110 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007111{
7112 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007113
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007114 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007115 if (!t_options)
7116 return NULL;
7117
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007118 return trace_create_file(option, 0644, t_options,
7119 (void *)&tr->trace_flags_index[index],
7120 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007121}
7122
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007123static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007124{
7125 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007126 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007127 int i;
7128
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007129 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007130 if (!t_options)
7131 return;
7132
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007133 for (i = 0; trace_options[i]; i++) {
7134 if (top_level ||
7135 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7136 create_trace_option_core_file(tr, trace_options[i], i);
7137 }
Steven Rostedta8259072009-02-26 22:19:12 -05007138}
7139
Steven Rostedt499e5472012-02-22 15:50:28 -05007140static ssize_t
7141rb_simple_read(struct file *filp, char __user *ubuf,
7142 size_t cnt, loff_t *ppos)
7143{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007144 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007145 char buf[64];
7146 int r;
7147
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007148 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007149 r = sprintf(buf, "%d\n", r);
7150
7151 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7152}
7153
7154static ssize_t
7155rb_simple_write(struct file *filp, const char __user *ubuf,
7156 size_t cnt, loff_t *ppos)
7157{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007158 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007159 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007160 unsigned long val;
7161 int ret;
7162
7163 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7164 if (ret)
7165 return ret;
7166
7167 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007168 mutex_lock(&trace_types_lock);
7169 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007170 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007171 if (tr->current_trace->start)
7172 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007173 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007174 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007175 if (tr->current_trace->stop)
7176 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007177 }
7178 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007179 }
7180
7181 (*ppos)++;
7182
7183 return cnt;
7184}
7185
7186static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007187 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007188 .read = rb_simple_read,
7189 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007190 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007191 .llseek = default_llseek,
7192};
7193
Steven Rostedt277ba042012-08-03 16:10:49 -04007194struct dentry *trace_instance_dir;
7195
7196static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007197init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007198
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007199static int
7200allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007201{
7202 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007203
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007204 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007205
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007206 buf->tr = tr;
7207
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007208 buf->buffer = ring_buffer_alloc(size, rb_flags);
7209 if (!buf->buffer)
7210 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007211
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007212 buf->data = alloc_percpu(struct trace_array_cpu);
7213 if (!buf->data) {
7214 ring_buffer_free(buf->buffer);
7215 return -ENOMEM;
7216 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007217
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007218 /* Allocate the first page for all buffers */
7219 set_buffer_entries(&tr->trace_buffer,
7220 ring_buffer_size(tr->trace_buffer.buffer, 0));
7221
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007222 return 0;
7223}
7224
7225static int allocate_trace_buffers(struct trace_array *tr, int size)
7226{
7227 int ret;
7228
7229 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7230 if (ret)
7231 return ret;
7232
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007233#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007234 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7235 allocate_snapshot ? size : 1);
7236 if (WARN_ON(ret)) {
7237 ring_buffer_free(tr->trace_buffer.buffer);
7238 free_percpu(tr->trace_buffer.data);
7239 return -ENOMEM;
7240 }
7241 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007242
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007243 /*
7244 * Only the top level trace array gets its snapshot allocated
7245 * from the kernel command line.
7246 */
7247 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007248#endif
7249 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007250}
7251
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007252static void free_trace_buffer(struct trace_buffer *buf)
7253{
7254 if (buf->buffer) {
7255 ring_buffer_free(buf->buffer);
7256 buf->buffer = NULL;
7257 free_percpu(buf->data);
7258 buf->data = NULL;
7259 }
7260}
7261
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007262static void free_trace_buffers(struct trace_array *tr)
7263{
7264 if (!tr)
7265 return;
7266
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007267 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007268
7269#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007270 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007271#endif
7272}
7273
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007274static void init_trace_flags_index(struct trace_array *tr)
7275{
7276 int i;
7277
7278 /* Used by the trace options files */
7279 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7280 tr->trace_flags_index[i] = i;
7281}
7282
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007283static void __update_tracer_options(struct trace_array *tr)
7284{
7285 struct tracer *t;
7286
7287 for (t = trace_types; t; t = t->next)
7288 add_tracer_options(tr, t);
7289}
7290
7291static void update_tracer_options(struct trace_array *tr)
7292{
7293 mutex_lock(&trace_types_lock);
7294 __update_tracer_options(tr);
7295 mutex_unlock(&trace_types_lock);
7296}
7297
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007298static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007299{
Steven Rostedt277ba042012-08-03 16:10:49 -04007300 struct trace_array *tr;
7301 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007302
7303 mutex_lock(&trace_types_lock);
7304
7305 ret = -EEXIST;
7306 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7307 if (tr->name && strcmp(tr->name, name) == 0)
7308 goto out_unlock;
7309 }
7310
7311 ret = -ENOMEM;
7312 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7313 if (!tr)
7314 goto out_unlock;
7315
7316 tr->name = kstrdup(name, GFP_KERNEL);
7317 if (!tr->name)
7318 goto out_free_tr;
7319
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007320 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7321 goto out_free_tr;
7322
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007323 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007324
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007325 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7326
Steven Rostedt277ba042012-08-03 16:10:49 -04007327 raw_spin_lock_init(&tr->start_lock);
7328
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007329 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7330
Steven Rostedt277ba042012-08-03 16:10:49 -04007331 tr->current_trace = &nop_trace;
7332
7333 INIT_LIST_HEAD(&tr->systems);
7334 INIT_LIST_HEAD(&tr->events);
7335
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007336 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007337 goto out_free_tr;
7338
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007339 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007340 if (!tr->dir)
7341 goto out_free_tr;
7342
7343 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007344 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007345 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007346 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007347 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007348
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007349 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007350 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007351 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007352
7353 list_add(&tr->list, &ftrace_trace_arrays);
7354
7355 mutex_unlock(&trace_types_lock);
7356
7357 return 0;
7358
7359 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007360 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007361 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007362 kfree(tr->name);
7363 kfree(tr);
7364
7365 out_unlock:
7366 mutex_unlock(&trace_types_lock);
7367
7368 return ret;
7369
7370}
7371
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007372static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007373{
7374 struct trace_array *tr;
7375 int found = 0;
7376 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007377 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007378
7379 mutex_lock(&trace_types_lock);
7380
7381 ret = -ENODEV;
7382 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7383 if (tr->name && strcmp(tr->name, name) == 0) {
7384 found = 1;
7385 break;
7386 }
7387 }
7388 if (!found)
7389 goto out_unlock;
7390
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007391 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007392 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007393 goto out_unlock;
7394
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007395 list_del(&tr->list);
7396
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007397 /* Disable all the flags that were enabled coming in */
7398 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7399 if ((1 << i) & ZEROED_TRACE_FLAGS)
7400 set_tracer_flag(tr, 1 << i, 0);
7401 }
7402
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007403 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007404 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007405 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007406 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007407 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007408
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007409 for (i = 0; i < tr->nr_topts; i++) {
7410 kfree(tr->topts[i].topts);
7411 }
7412 kfree(tr->topts);
7413
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007414 kfree(tr->name);
7415 kfree(tr);
7416
7417 ret = 0;
7418
7419 out_unlock:
7420 mutex_unlock(&trace_types_lock);
7421
7422 return ret;
7423}
7424
Steven Rostedt277ba042012-08-03 16:10:49 -04007425static __init void create_trace_instances(struct dentry *d_tracer)
7426{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007427 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7428 instance_mkdir,
7429 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007430 if (WARN_ON(!trace_instance_dir))
7431 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007432}
7433
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007434static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007435init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007436{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007437 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007438
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007439 trace_create_file("available_tracers", 0444, d_tracer,
7440 tr, &show_traces_fops);
7441
7442 trace_create_file("current_tracer", 0644, d_tracer,
7443 tr, &set_tracer_fops);
7444
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007445 trace_create_file("tracing_cpumask", 0644, d_tracer,
7446 tr, &tracing_cpumask_fops);
7447
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007448 trace_create_file("trace_options", 0644, d_tracer,
7449 tr, &tracing_iter_fops);
7450
7451 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007452 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007453
7454 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007455 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007456
7457 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007458 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007459
7460 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7461 tr, &tracing_total_entries_fops);
7462
Wang YanQing238ae932013-05-26 16:52:01 +08007463 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007464 tr, &tracing_free_buffer_fops);
7465
7466 trace_create_file("trace_marker", 0220, d_tracer,
7467 tr, &tracing_mark_fops);
7468
Steven Rostedtfa32e852016-07-06 15:25:08 -04007469 trace_create_file("trace_marker_raw", 0220, d_tracer,
7470 tr, &tracing_mark_raw_fops);
7471
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007472 trace_create_file("trace_clock", 0644, d_tracer, tr,
7473 &trace_clock_fops);
7474
7475 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007476 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007477
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007478 create_trace_options_dir(tr);
7479
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007480#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007481 trace_create_file("tracing_max_latency", 0644, d_tracer,
7482 &tr->max_latency, &tracing_max_lat_fops);
7483#endif
7484
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007485 if (ftrace_create_function_files(tr, d_tracer))
7486 WARN(1, "Could not allocate function filter files");
7487
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007488#ifdef CONFIG_TRACER_SNAPSHOT
7489 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007490 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007491#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007492
7493 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007494 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007495
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007496 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007497}
7498
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007499static struct vfsmount *trace_automount(void *ingore)
7500{
7501 struct vfsmount *mnt;
7502 struct file_system_type *type;
7503
7504 /*
7505 * To maintain backward compatibility for tools that mount
7506 * debugfs to get to the tracing facility, tracefs is automatically
7507 * mounted to the debugfs/tracing directory.
7508 */
7509 type = get_fs_type("tracefs");
7510 if (!type)
7511 return NULL;
7512 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
7513 put_filesystem(type);
7514 if (IS_ERR(mnt))
7515 return NULL;
7516 mntget(mnt);
7517
7518 return mnt;
7519}
7520
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007521/**
7522 * tracing_init_dentry - initialize top level trace array
7523 *
7524 * This is called when creating files or directories in the tracing
7525 * directory. It is called via fs_initcall() by any of the boot up code
7526 * and expects to return the dentry of the top level tracing directory.
7527 */
7528struct dentry *tracing_init_dentry(void)
7529{
7530 struct trace_array *tr = &global_trace;
7531
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007532 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007533 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007534 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007535
Jiaxing Wang8b129192015-11-06 16:04:16 +08007536 if (WARN_ON(!tracefs_initialized()) ||
7537 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7538 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007539 return ERR_PTR(-ENODEV);
7540
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007541 /*
7542 * As there may still be users that expect the tracing
7543 * files to exist in debugfs/tracing, we must automount
7544 * the tracefs file system there, so older tools still
7545 * work with the newer kerenl.
7546 */
7547 tr->dir = debugfs_create_automount("tracing", NULL,
7548 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007549 if (!tr->dir) {
7550 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7551 return ERR_PTR(-ENOMEM);
7552 }
7553
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007554 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007555}
7556
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007557extern struct trace_enum_map *__start_ftrace_enum_maps[];
7558extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7559
7560static void __init trace_enum_init(void)
7561{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007562 int len;
7563
7564 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007565 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007566}
7567
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007568#ifdef CONFIG_MODULES
7569static void trace_module_add_enums(struct module *mod)
7570{
7571 if (!mod->num_trace_enums)
7572 return;
7573
7574 /*
7575 * Modules with bad taint do not have events created, do
7576 * not bother with enums either.
7577 */
7578 if (trace_module_has_bad_taint(mod))
7579 return;
7580
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007581 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007582}
7583
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007584#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7585static void trace_module_remove_enums(struct module *mod)
7586{
7587 union trace_enum_map_item *map;
7588 union trace_enum_map_item **last = &trace_enum_maps;
7589
7590 if (!mod->num_trace_enums)
7591 return;
7592
7593 mutex_lock(&trace_enum_mutex);
7594
7595 map = trace_enum_maps;
7596
7597 while (map) {
7598 if (map->head.mod == mod)
7599 break;
7600 map = trace_enum_jmp_to_tail(map);
7601 last = &map->tail.next;
7602 map = map->tail.next;
7603 }
7604 if (!map)
7605 goto out;
7606
7607 *last = trace_enum_jmp_to_tail(map)->tail.next;
7608 kfree(map);
7609 out:
7610 mutex_unlock(&trace_enum_mutex);
7611}
7612#else
7613static inline void trace_module_remove_enums(struct module *mod) { }
7614#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7615
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007616static int trace_module_notify(struct notifier_block *self,
7617 unsigned long val, void *data)
7618{
7619 struct module *mod = data;
7620
7621 switch (val) {
7622 case MODULE_STATE_COMING:
7623 trace_module_add_enums(mod);
7624 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007625 case MODULE_STATE_GOING:
7626 trace_module_remove_enums(mod);
7627 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007628 }
7629
7630 return 0;
7631}
7632
7633static struct notifier_block trace_module_nb = {
7634 .notifier_call = trace_module_notify,
7635 .priority = 0,
7636};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007637#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007638
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007639static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007640{
7641 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007642
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007643 trace_access_lock_init();
7644
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007645 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007646 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007647 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007648
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007649 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007650 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007651
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007652 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007653 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007654
Li Zefan339ae5d2009-04-17 10:34:30 +08007655 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007656 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007657
Avadh Patel69abe6a2009-04-10 16:04:48 -04007658 trace_create_file("saved_cmdlines", 0444, d_tracer,
7659 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007660
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007661 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7662 NULL, &tracing_saved_cmdlines_size_fops);
7663
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007664 trace_enum_init();
7665
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007666 trace_create_enum_file(d_tracer);
7667
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007668#ifdef CONFIG_MODULES
7669 register_module_notifier(&trace_module_nb);
7670#endif
7671
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007672#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007673 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7674 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007675#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007676
Steven Rostedt277ba042012-08-03 16:10:49 -04007677 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007678
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007679 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007680
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007681 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007682}
7683
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007684static int trace_panic_handler(struct notifier_block *this,
7685 unsigned long event, void *unused)
7686{
Steven Rostedt944ac422008-10-23 19:26:08 -04007687 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007688 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007689 return NOTIFY_OK;
7690}
7691
7692static struct notifier_block trace_panic_notifier = {
7693 .notifier_call = trace_panic_handler,
7694 .next = NULL,
7695 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7696};
7697
7698static int trace_die_handler(struct notifier_block *self,
7699 unsigned long val,
7700 void *data)
7701{
7702 switch (val) {
7703 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007704 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007705 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007706 break;
7707 default:
7708 break;
7709 }
7710 return NOTIFY_OK;
7711}
7712
7713static struct notifier_block trace_die_notifier = {
7714 .notifier_call = trace_die_handler,
7715 .priority = 200
7716};
7717
7718/*
7719 * printk is set to max of 1024, we really don't need it that big.
7720 * Nothing should be printing 1000 characters anyway.
7721 */
7722#define TRACE_MAX_PRINT 1000
7723
7724/*
7725 * Define here KERN_TRACE so that we have one place to modify
7726 * it if we decide to change what log level the ftrace dump
7727 * should be at.
7728 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007729#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007730
Jason Wessel955b61e2010-08-05 09:22:23 -05007731void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007732trace_printk_seq(struct trace_seq *s)
7733{
7734 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007735 if (s->seq.len >= TRACE_MAX_PRINT)
7736 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007737
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007738 /*
7739 * More paranoid code. Although the buffer size is set to
7740 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7741 * an extra layer of protection.
7742 */
7743 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7744 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007745
7746 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007747 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007748
7749 printk(KERN_TRACE "%s", s->buffer);
7750
Steven Rostedtf9520752009-03-02 14:04:40 -05007751 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007752}
7753
Jason Wessel955b61e2010-08-05 09:22:23 -05007754void trace_init_global_iter(struct trace_iterator *iter)
7755{
7756 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007757 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007758 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007759 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007760
7761 if (iter->trace && iter->trace->open)
7762 iter->trace->open(iter);
7763
7764 /* Annotate start of buffers if we had overruns */
7765 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7766 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7767
7768 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7769 if (trace_clocks[iter->tr->clock_id].in_ns)
7770 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007771}
7772
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007773void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007774{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007775 /* use static because iter can be a bit big for the stack */
7776 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007777 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007778 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007779 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007780 unsigned long flags;
7781 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007782
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007783 /* Only allow one dump user at a time. */
7784 if (atomic_inc_return(&dump_running) != 1) {
7785 atomic_dec(&dump_running);
7786 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007787 }
7788
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007789 /*
7790 * Always turn off tracing when we dump.
7791 * We don't need to show trace output of what happens
7792 * between multiple crashes.
7793 *
7794 * If the user does a sysrq-z, then they can re-enable
7795 * tracing with echo 1 > tracing_on.
7796 */
7797 tracing_off();
7798
7799 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007800
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007801 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007802 trace_init_global_iter(&iter);
7803
Steven Rostedtd7690412008-10-01 00:29:53 -04007804 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307805 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007806 }
7807
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007808 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007809
Török Edwinb54d3de2008-11-22 13:28:48 +02007810 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007811 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007812
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007813 switch (oops_dump_mode) {
7814 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007815 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007816 break;
7817 case DUMP_ORIG:
7818 iter.cpu_file = raw_smp_processor_id();
7819 break;
7820 case DUMP_NONE:
7821 goto out_enable;
7822 default:
7823 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007824 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007825 }
7826
7827 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007828
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007829 /* Did function tracer already get disabled? */
7830 if (ftrace_is_dead()) {
7831 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7832 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7833 }
7834
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007835 /*
7836 * We need to stop all tracing on all CPUS to read the
7837 * the next buffer. This is a bit expensive, but is
7838 * not done often. We fill all what we can read,
7839 * and then release the locks again.
7840 */
7841
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007842 while (!trace_empty(&iter)) {
7843
7844 if (!cnt)
7845 printk(KERN_TRACE "---------------------------------\n");
7846
7847 cnt++;
7848
7849 /* reset all but tr, trace, and overruns */
7850 memset(&iter.seq, 0,
7851 sizeof(struct trace_iterator) -
7852 offsetof(struct trace_iterator, seq));
7853 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7854 iter.pos = -1;
7855
Jason Wessel955b61e2010-08-05 09:22:23 -05007856 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007857 int ret;
7858
7859 ret = print_trace_line(&iter);
7860 if (ret != TRACE_TYPE_NO_CONSUME)
7861 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007862 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007863 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007864
7865 trace_printk_seq(&iter.seq);
7866 }
7867
7868 if (!cnt)
7869 printk(KERN_TRACE " (ftrace buffer empty)\n");
7870 else
7871 printk(KERN_TRACE "---------------------------------\n");
7872
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007873 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007874 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007875
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007876 for_each_tracing_cpu(cpu) {
7877 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007878 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007879 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007880 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007881}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007882EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007883
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007884__init static int tracer_alloc_buffers(void)
7885{
Steven Rostedt73c51622009-03-11 13:42:01 -04007886 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307887 int ret = -ENOMEM;
7888
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007889 /*
7890 * Make sure we don't accidently add more trace options
7891 * than we have bits for.
7892 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007893 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007894
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307895 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7896 goto out;
7897
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007898 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307899 goto out_free_buffer_mask;
7900
Steven Rostedt07d777f2011-09-22 14:01:55 -04007901 /* Only allocate trace_printk buffers if a trace_printk exists */
7902 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007903 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007904 trace_printk_init_buffers();
7905
Steven Rostedt73c51622009-03-11 13:42:01 -04007906 /* To save memory, keep the ring buffer size to its minimum */
7907 if (ring_buffer_expanded)
7908 ring_buf_size = trace_buf_size;
7909 else
7910 ring_buf_size = 1;
7911
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307912 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007913 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007914
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007915 raw_spin_lock_init(&global_trace.start_lock);
7916
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01007917 /*
7918 * The prepare callbacks allocates some memory for the ring buffer. We
7919 * don't free the buffer if the if the CPU goes down. If we were to free
7920 * the buffer, then the user would lose any trace that was in the
7921 * buffer. The memory will be removed once the "instance" is removed.
7922 */
7923 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
7924 "trace/RB:preapre", trace_rb_cpu_prepare,
7925 NULL);
7926 if (ret < 0)
7927 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007928 /* Used for event triggers */
7929 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7930 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01007931 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007932
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007933 if (trace_create_savedcmd() < 0)
7934 goto out_free_temp_buffer;
7935
Steven Rostedtab464282008-05-12 21:21:00 +02007936 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007937 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007938 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7939 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007940 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007941 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007942
Steven Rostedt499e5472012-02-22 15:50:28 -05007943 if (global_trace.buffer_disabled)
7944 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007945
Steven Rostedte1e232c2014-02-10 23:38:46 -05007946 if (trace_boot_clock) {
7947 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7948 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007949 pr_warn("Trace clock %s not defined, going back to default\n",
7950 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05007951 }
7952
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007953 /*
7954 * register_tracer() might reference current_trace, so it
7955 * needs to be set before we register anything. This is
7956 * just a bootstrap of current_trace anyway.
7957 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007958 global_trace.current_trace = &nop_trace;
7959
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007960 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7961
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007962 ftrace_init_global_array_ops(&global_trace);
7963
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007964 init_trace_flags_index(&global_trace);
7965
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007966 register_tracer(&nop_trace);
7967
Steven Rostedt60a11772008-05-12 21:20:44 +02007968 /* All seems OK, enable tracing */
7969 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007970
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007971 atomic_notifier_chain_register(&panic_notifier_list,
7972 &trace_panic_notifier);
7973
7974 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007975
Steven Rostedtae63b31e2012-05-03 23:09:03 -04007976 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7977
7978 INIT_LIST_HEAD(&global_trace.systems);
7979 INIT_LIST_HEAD(&global_trace.events);
7980 list_add(&global_trace.list, &ftrace_trace_arrays);
7981
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007982 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007983
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007984 register_snapshot_cmd();
7985
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007986 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007987
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007988out_free_savedcmd:
7989 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007990out_free_temp_buffer:
7991 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01007992out_rm_hp_state:
7993 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307994out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007995 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307996out_free_buffer_mask:
7997 free_cpumask_var(tracing_buffer_mask);
7998out:
7999 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008000}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008001
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008002void __init trace_init(void)
8003{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008004 if (tracepoint_printk) {
8005 tracepoint_print_iter =
8006 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8007 if (WARN_ON(!tracepoint_print_iter))
8008 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008009 else
8010 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008011 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008012 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008013 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008014}
8015
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008016__init static int clear_boot_tracer(void)
8017{
8018 /*
8019 * The default tracer at boot buffer is an init section.
8020 * This function is called in lateinit. If we did not
8021 * find the boot tracer, then clear it out, to prevent
8022 * later registration from accessing the buffer that is
8023 * about to be freed.
8024 */
8025 if (!default_bootup_tracer)
8026 return 0;
8027
8028 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8029 default_bootup_tracer);
8030 default_bootup_tracer = NULL;
8031
8032 return 0;
8033}
8034
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008035fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008036late_initcall(clear_boot_tracer);