blob: 5b645b0fbbb8bc509d6af2d7f077d57ec06e0c96 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080043#include <linux/trace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060044#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020046#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050047#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010049/*
Steven Rostedt73c51622009-03-11 13:42:01 -040050 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050053bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040054
55/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010059 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * at the same time, giving false positive or negative results.
61 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063
Steven Rostedtb2821ae2009-02-02 21:38:32 -050064/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
Li Zefan020e5f82009-07-01 10:47:05 +080067bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050068
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050069/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050072static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050073
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010074/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050079static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010081{
82 return 0;
83}
Steven Rostedt0f048702008-11-05 16:05:44 -050084
85/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040086 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
90static DEFINE_PER_CPU(bool, trace_cmdline_save);
91
92/*
Steven Rostedt0f048702008-11-05 16:05:44 -050093 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
Hannes Eder4fd27352009-02-10 19:44:12 +010098static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050099
Jason Wessel955b61e2010-08-05 09:22:23 -0500100cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200101
Steven Rostedt944ac422008-10-23 19:26:08 -0400102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400116 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200117
118enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400119
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400123#ifdef CONFIG_TRACE_ENUM_MAP_FILE
124/* Map of enums to their values, for "enum_map" file */
125struct trace_enum_map_head {
126 struct module *mod;
127 unsigned long length;
128};
129
130union trace_enum_map_item;
131
132struct trace_enum_map_tail {
133 /*
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "enum_string"
136 */
137 union trace_enum_map_item *next;
138 const char *end; /* points to NULL */
139};
140
141static DEFINE_MUTEX(trace_enum_mutex);
142
143/*
144 * The trace_enum_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved enum_map items.
149 */
150union trace_enum_map_item {
151 struct trace_enum_map map;
152 struct trace_enum_map_head head;
153 struct trace_enum_map_tail tail;
154};
155
156static union trace_enum_map_item *trace_enum_maps;
157#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
158
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500160
Li Zefanee6c2c12009-09-18 14:06:47 +0800161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100164
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165static bool allocate_snapshot;
166
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200167static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100168{
Chen Gang67012ab2013-04-08 12:06:44 +0800169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500170 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400171 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173 return 1;
174}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176
Steven Rostedt944ac422008-10-23 19:26:08 -0400177static int __init set_ftrace_dump_on_oops(char *str)
178{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200192
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400193static int __init stop_trace_on_warning(char *str)
194{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400197 return 1;
198}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200199__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400201static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400208__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500209
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static int __init set_trace_boot_options(char *str)
214{
Chen Gang67012ab2013-04-08 12:06:44 +0800215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
Steven Rostedte1e232c2014-02-10 23:38:46 -0500220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400238
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100239unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200265 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200269
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400270LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400303int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400309 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 return 1;
311 }
312
313 return 0;
314}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500315
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
Steven Rostedtd8275c42016-04-14 12:15:22 -0400322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400516 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700573 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400574 return trace_clock_local();
575
Alexander Z Lam94571582013-08-02 18:36:16 -0700576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400578
579 return ts;
580}
581
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100582u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
Steven Rostedt90369902008-11-05 16:05:44 -0500596int tracing_is_enabled(void)
597{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500605}
606
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200607/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400618
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200622static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700627DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500657 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
Steven Rostedtae3b5092013-01-23 15:22:59 -0500663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500673 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400730{
731}
732
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400733#endif
734
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400760void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200777/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
793 __this_cpu_write(trace_cmdline_save, true);
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800818 int pc;
819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800821 return 0;
822
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800823 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500824
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800868 int pc;
869
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800871 return 0;
872
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800873 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500874
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500896#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400897static void tracing_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500898{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500908 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400926
927/**
928 * trace_snapshot - take a snapshot of the current buffer.
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500947EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953static int alloc_snapshot(struct trace_array *tr)
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
Fabian Frederickad1438a2014-04-17 21:44:42 +0200971static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500983
984/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
999 ret = alloc_snapshot(tr);
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
1006/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001019 int ret;
1020
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001023 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001024
1025 tracing_snapshot();
1026}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001033EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001048void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
Steven Rostedt499e5472012-02-22 15:50:28 -05001065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001075 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
Steven Rostedt499e5472012-02-22 15:50:28 -05001085/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001091int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
Steven Rostedt499e5472012-02-22 15:50:28 -05001098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001103 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001107static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001108{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001109 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001110
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111 if (!str)
1112 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001113 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001114 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001115 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001116 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001117 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001118 return 1;
1119}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001120__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001121
Tim Bird0e950172010-02-25 15:36:43 -08001122static int __init set_tracing_thresh(char *str)
1123{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001124 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001125 int ret;
1126
1127 if (!str)
1128 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001129 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001130 if (ret < 0)
1131 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001132 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
Steven Rostedt57f50be2008-05-12 21:20:44 +02001137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the enums were defined.
1147 */
1148#undef C
1149#define C(a, b) b
1150
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001151/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001152static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001153 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001154 NULL
1155};
1156
Zhaolei5079f322009-08-25 16:12:56 +08001157static struct {
1158 u64 (*func)(void);
1159 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001160 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001161} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001165 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Joel Fernandes80ec3552016-11-28 14:35:23 -08001169 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001170 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001171};
1172
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001173/*
1174 * trace_parser_get_init - gets the buffer for trace parser
1175 */
1176int trace_parser_get_init(struct trace_parser *parser, int size)
1177{
1178 memset(parser, 0, sizeof(*parser));
1179
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1182 return 1;
1183
1184 parser->size = size;
1185 return 0;
1186}
1187
1188/*
1189 * trace_parser_put - frees the buffer for trace parser
1190 */
1191void trace_parser_put(struct trace_parser *parser)
1192{
1193 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001194 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1210{
1211 char ch;
1212 size_t read = 0;
1213 ssize_t ret;
1214
1215 if (!*ppos)
1216 trace_parser_clear(parser);
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221
1222 read++;
1223 cnt--;
1224
1225 /*
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1228 */
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1233 if (ret)
1234 goto out;
1235 read++;
1236 cnt--;
1237 }
1238
1239 /* only spaces were written */
1240 if (isspace(ch)) {
1241 *ppos += read;
1242 ret = read;
1243 goto out;
1244 }
1245
1246 parser->idx = 0;
1247 }
1248
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001251 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001252 parser->buffer[parser->idx++] = ch;
1253 else {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257 ret = get_user(ch, ubuf++);
1258 if (ret)
1259 goto out;
1260 read++;
1261 cnt--;
1262 }
1263
1264 /* We either got finished input or we have to wait for another call. */
1265 if (isspace(ch)) {
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001268 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001271 } else {
1272 ret = -EINVAL;
1273 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001274 }
1275
1276 *ppos += read;
1277 ret = read;
1278
1279out:
1280 return ret;
1281}
1282
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001283/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001285{
1286 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001287
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001288 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001289 return -EBUSY;
1290
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001291 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001292 if (cnt > len)
1293 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001295
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001296 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001297 return cnt;
1298}
1299
Tim Bird0e950172010-02-25 15:36:43 -08001300unsigned long __read_mostly tracing_thresh;
1301
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001302#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001315
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001316 max_buf->cpu = cpu;
1317 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001318
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001319 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001322
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001324 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001325 /*
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1328 */
1329 if (tsk == current)
1330 max_data->uid = current_uid();
1331 else
1332 max_data->uid = task_uid(tsk);
1333
Steven Rostedt8248ac02009-09-02 12:27:41 -04001334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001337
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1340}
1341
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
Ingo Molnare309b412008-05-12 21:20:51 +02001351void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001354 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001356 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001357 return;
1358
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001359 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001360
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001361 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001362 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001364 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001365 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001366
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001367 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001368
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001372
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001373 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001374 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001375}
1376
1377/**
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1379 * @tr - tracer
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001382 *
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001384 */
Ingo Molnare309b412008-05-12 21:20:51 +02001385void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001386update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001388 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001389
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001390 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001391 return;
1392
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001393 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001394 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001395 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001397 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001398 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001399
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001400 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001401
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001403
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001404 if (ret == -EBUSY) {
1405 /*
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1410 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001412 "Failed to swap buffers due to commit in progress\n");
1413 }
1414
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001416
1417 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001418 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001419}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001420#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001421
Rabin Vincente30f53a2014-11-10 19:46:34 +01001422static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001423{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001426 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001427
Rabin Vincente30f53a2014-11-10 19:46:34 +01001428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001430}
1431
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001432#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001433static bool selftests_can_run;
1434
1435struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1438};
1439
1440static LIST_HEAD(postponed_selftests);
1441
1442static int save_selftest(struct tracer *type)
1443{
1444 struct trace_selftests *selftest;
1445
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447 if (!selftest)
1448 return -ENOMEM;
1449
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1452 return 0;
1453}
1454
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001455static int run_tracer_selftest(struct tracer *type)
1456{
1457 struct trace_array *tr = &global_trace;
1458 struct tracer *saved_tracer = tr->current_trace;
1459 int ret;
1460
1461 if (!type->selftest || tracing_selftest_disabled)
1462 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001463
1464 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1468 */
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1471
1472 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001478 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001479 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001480
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001481 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001483#ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type->use_max_tr) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded)
1487 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488 RING_BUFFER_ALL_CPUS);
1489 tr->allocated_snapshot = true;
1490 }
1491#endif
1492
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type->name);
1495 ret = type->selftest(type, tr);
1496 /* the test is responsible for resetting too */
1497 tr->current_trace = saved_tracer;
1498 if (ret) {
1499 printk(KERN_CONT "FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1501 WARN_ON(1);
1502 return -1;
1503 }
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr->trace_buffer);
1506
1507#ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type->use_max_tr) {
1509 tr->allocated_snapshot = false;
1510
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded)
1513 ring_buffer_resize(tr->max_buffer.buffer, 1,
1514 RING_BUFFER_ALL_CPUS);
1515 }
1516#endif
1517
1518 printk(KERN_CONT "PASSED\n");
1519 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001521
1522static __init int init_trace_selftests(void)
1523{
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1526 int ret;
1527
1528 selftests_can_run = true;
1529
1530 mutex_lock(&trace_types_lock);
1531
1532 if (list_empty(&postponed_selftests))
1533 goto out;
1534
1535 pr_info("Running postponed tracer tests:\n");
1536
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1540 if (ret < 0) {
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1542 p->type->name);
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1545 if (t == p->type) {
1546 *last = t->next;
1547 break;
1548 }
1549 last = &t->next;
1550 }
1551 }
1552 list_del(&p->list);
1553 kfree(p);
1554 }
1555
1556 out:
1557 mutex_unlock(&trace_types_lock);
1558
1559 return 0;
1560}
1561early_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001562#else
1563static inline int run_tracer_selftest(struct tracer *type)
1564{
1565 return 0;
1566}
1567#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001568
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001569static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1570
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001571static void __init apply_trace_boot_options(void);
1572
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001573/**
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1576 *
1577 * Register a new plugin tracer.
1578 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001579int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580{
1581 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001582 int ret = 0;
1583
1584 if (!type->name) {
1585 pr_info("Tracer must have a name\n");
1586 return -1;
1587 }
1588
Dan Carpenter24a461d2010-07-10 12:06:44 +02001589 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1591 return -1;
1592 }
1593
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001594 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001595
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001596 tracing_selftest_running = true;
1597
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001598 for (t = trace_types; t; t = t->next) {
1599 if (strcmp(type->name, t->name) == 0) {
1600 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001601 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 type->name);
1603 ret = -1;
1604 goto out;
1605 }
1606 }
1607
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001608 if (!type->set_flag)
1609 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001610 if (!type->flags) {
1611 /*allocate a dummy tracer_flags*/
1612 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001613 if (!type->flags) {
1614 ret = -ENOMEM;
1615 goto out;
1616 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001617 type->flags->val = 0;
1618 type->flags->opts = dummy_tracer_opt;
1619 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001620 if (!type->flags->opts)
1621 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001622
Chunyu Hud39cdd22016-03-08 21:37:01 +08001623 /* store the tracer for __set_tracer_option */
1624 type->flags->trace = type;
1625
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001626 ret = run_tracer_selftest(type);
1627 if (ret < 0)
1628 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001629
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001630 type->next = trace_types;
1631 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001632 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001633
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001634 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001635 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001636 mutex_unlock(&trace_types_lock);
1637
Steven Rostedtdac74942009-02-05 01:13:38 -05001638 if (ret || !default_bootup_tracer)
1639 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001640
Li Zefanee6c2c12009-09-18 14:06:47 +08001641 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001642 goto out_unlock;
1643
1644 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001646 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001647 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001648
1649 apply_trace_boot_options();
1650
Steven Rostedtdac74942009-02-05 01:13:38 -05001651 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001652 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001653#ifdef CONFIG_FTRACE_STARTUP_TEST
1654 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1655 type->name);
1656#endif
1657
1658 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001659 return ret;
1660}
1661
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001662void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001663{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001664 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001665
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001666 if (!buffer)
1667 return;
1668
Steven Rostedtf6339032009-09-04 12:35:16 -04001669 ring_buffer_record_disable(buffer);
1670
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001673 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001674
1675 ring_buffer_record_enable(buffer);
1676}
1677
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001678void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001679{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001680 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001681 int cpu;
1682
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001683 if (!buffer)
1684 return;
1685
Steven Rostedt621968c2009-09-04 12:02:35 -04001686 ring_buffer_record_disable(buffer);
1687
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1690
Alexander Z Lam94571582013-08-02 18:36:16 -07001691 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001692
1693 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001694 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001695
1696 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001697}
1698
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001699/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001700void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001701{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001702 struct trace_array *tr;
1703
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001705 tracing_reset_online_cpus(&tr->trace_buffer);
1706#ifdef CONFIG_TRACER_MAX_TRACE
1707 tracing_reset_online_cpus(&tr->max_buffer);
1708#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001709 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001710}
1711
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001712#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001713#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001714static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001715struct saved_cmdlines_buffer {
1716 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1717 unsigned *map_cmdline_to_pid;
1718 unsigned cmdline_num;
1719 int cmdline_idx;
1720 char *saved_cmdlines;
1721};
1722static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001723
Steven Rostedt25b0b442008-05-12 21:21:00 +02001724/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001725static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001726
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001727static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001728{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001729 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1730}
1731
1732static inline void set_cmdline(int idx, const char *cmdline)
1733{
1734 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1735}
1736
1737static int allocate_cmdlines_buffer(unsigned int val,
1738 struct saved_cmdlines_buffer *s)
1739{
1740 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1741 GFP_KERNEL);
1742 if (!s->map_cmdline_to_pid)
1743 return -ENOMEM;
1744
1745 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1746 if (!s->saved_cmdlines) {
1747 kfree(s->map_cmdline_to_pid);
1748 return -ENOMEM;
1749 }
1750
1751 s->cmdline_idx = 0;
1752 s->cmdline_num = val;
1753 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1754 sizeof(s->map_pid_to_cmdline));
1755 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1756 val * sizeof(*s->map_cmdline_to_pid));
1757
1758 return 0;
1759}
1760
1761static int trace_create_savedcmd(void)
1762{
1763 int ret;
1764
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001765 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001766 if (!savedcmd)
1767 return -ENOMEM;
1768
1769 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1770 if (ret < 0) {
1771 kfree(savedcmd);
1772 savedcmd = NULL;
1773 return -ENOMEM;
1774 }
1775
1776 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001777}
1778
Carsten Emdeb5130b12009-09-13 01:43:07 +02001779int is_tracing_stopped(void)
1780{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001781 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001782}
1783
Steven Rostedt0f048702008-11-05 16:05:44 -05001784/**
1785 * tracing_start - quick start of the tracer
1786 *
1787 * If tracing is enabled but was stopped by tracing_stop,
1788 * this will start the tracer back up.
1789 */
1790void tracing_start(void)
1791{
1792 struct ring_buffer *buffer;
1793 unsigned long flags;
1794
1795 if (tracing_disabled)
1796 return;
1797
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001798 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1799 if (--global_trace.stop_count) {
1800 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001801 /* Someone screwed up their debugging */
1802 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001803 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001804 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001805 goto out;
1806 }
1807
Steven Rostedta2f80712010-03-12 19:56:00 -05001808 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001809 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001810
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001811 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001812 if (buffer)
1813 ring_buffer_record_enable(buffer);
1814
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001815#ifdef CONFIG_TRACER_MAX_TRACE
1816 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001817 if (buffer)
1818 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001819#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001820
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001821 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001822
Steven Rostedt0f048702008-11-05 16:05:44 -05001823 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001824 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1825}
1826
1827static void tracing_start_tr(struct trace_array *tr)
1828{
1829 struct ring_buffer *buffer;
1830 unsigned long flags;
1831
1832 if (tracing_disabled)
1833 return;
1834
1835 /* If global, we need to also start the max tracer */
1836 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1837 return tracing_start();
1838
1839 raw_spin_lock_irqsave(&tr->start_lock, flags);
1840
1841 if (--tr->stop_count) {
1842 if (tr->stop_count < 0) {
1843 /* Someone screwed up their debugging */
1844 WARN_ON_ONCE(1);
1845 tr->stop_count = 0;
1846 }
1847 goto out;
1848 }
1849
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001850 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001851 if (buffer)
1852 ring_buffer_record_enable(buffer);
1853
1854 out:
1855 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001856}
1857
1858/**
1859 * tracing_stop - quick stop of the tracer
1860 *
1861 * Light weight way to stop tracing. Use in conjunction with
1862 * tracing_start.
1863 */
1864void tracing_stop(void)
1865{
1866 struct ring_buffer *buffer;
1867 unsigned long flags;
1868
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001869 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1870 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001871 goto out;
1872
Steven Rostedta2f80712010-03-12 19:56:00 -05001873 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001874 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001875
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001876 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001877 if (buffer)
1878 ring_buffer_record_disable(buffer);
1879
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001880#ifdef CONFIG_TRACER_MAX_TRACE
1881 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001882 if (buffer)
1883 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001884#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001885
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001886 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001887
Steven Rostedt0f048702008-11-05 16:05:44 -05001888 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001889 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1890}
1891
1892static void tracing_stop_tr(struct trace_array *tr)
1893{
1894 struct ring_buffer *buffer;
1895 unsigned long flags;
1896
1897 /* If global, we need to also stop the max tracer */
1898 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1899 return tracing_stop();
1900
1901 raw_spin_lock_irqsave(&tr->start_lock, flags);
1902 if (tr->stop_count++)
1903 goto out;
1904
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001905 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001906 if (buffer)
1907 ring_buffer_record_disable(buffer);
1908
1909 out:
1910 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001911}
1912
Ingo Molnare309b412008-05-12 21:20:51 +02001913void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001914
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001915static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001916{
Carsten Emdea635cf02009-03-18 09:00:41 +01001917 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001918
1919 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001920 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001921
1922 /*
1923 * It's not the end of the world if we don't get
1924 * the lock, but we also don't want to spin
1925 * nor do we want to disable interrupts,
1926 * so if we miss here, then better luck next time.
1927 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001928 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001929 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001930
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001931 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001932 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001933 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001934
Carsten Emdea635cf02009-03-18 09:00:41 +01001935 /*
1936 * Check whether the cmdline buffer at idx has a pid
1937 * mapped. We are going to overwrite that entry so we
1938 * need to clear the map_pid_to_cmdline. Otherwise we
1939 * would read the new comm for the old pid.
1940 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001941 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001942 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001943 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001944
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001945 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1946 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001947
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001948 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001949 }
1950
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001951 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001952
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001953 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001954
1955 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001956}
1957
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001958static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001959{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001960 unsigned map;
1961
Steven Rostedt4ca530852009-03-16 19:20:15 -04001962 if (!pid) {
1963 strcpy(comm, "<idle>");
1964 return;
1965 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001966
Steven Rostedt74bf4072010-01-25 15:11:53 -05001967 if (WARN_ON_ONCE(pid < 0)) {
1968 strcpy(comm, "<XXX>");
1969 return;
1970 }
1971
Steven Rostedt4ca530852009-03-16 19:20:15 -04001972 if (pid > PID_MAX_DEFAULT) {
1973 strcpy(comm, "<...>");
1974 return;
1975 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001976
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001977 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001978 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001979 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001980 else
1981 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001982}
1983
1984void trace_find_cmdline(int pid, char comm[])
1985{
1986 preempt_disable();
1987 arch_spin_lock(&trace_cmdline_lock);
1988
1989 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001990
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001991 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001992 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001993}
1994
Ingo Molnare309b412008-05-12 21:20:51 +02001995void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001996{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001997 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001998 return;
1999
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002000 if (!__this_cpu_read(trace_cmdline_save))
2001 return;
2002
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002003 if (trace_save_cmdline(tsk))
2004 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002005}
2006
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002007/*
2008 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2009 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2010 * simplifies those functions and keeps them in sync.
2011 */
2012enum print_line_t trace_handle_return(struct trace_seq *s)
2013{
2014 return trace_seq_has_overflowed(s) ?
2015 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2016}
2017EXPORT_SYMBOL_GPL(trace_handle_return);
2018
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002019void
Steven Rostedt38697052008-10-01 13:14:09 -04002020tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2021 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002022{
2023 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002024
Steven Rostedt777e2082008-09-29 23:02:42 -04002025 entry->preempt_count = pc & 0xff;
2026 entry->pid = (tsk) ? tsk->pid : 0;
2027 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002028#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002029 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002030#else
2031 TRACE_FLAG_IRQS_NOSUPPORT |
2032#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002033 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002034 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302035 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002036 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2037 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002038}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002039EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002040
Steven Rostedte77405a2009-09-02 14:17:06 -04002041struct ring_buffer_event *
2042trace_buffer_lock_reserve(struct ring_buffer *buffer,
2043 int type,
2044 unsigned long len,
2045 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002046{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002047 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002048}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002049
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002050DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2051DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2052static int trace_buffered_event_ref;
2053
2054/**
2055 * trace_buffered_event_enable - enable buffering events
2056 *
2057 * When events are being filtered, it is quicker to use a temporary
2058 * buffer to write the event data into if there's a likely chance
2059 * that it will not be committed. The discard of the ring buffer
2060 * is not as fast as committing, and is much slower than copying
2061 * a commit.
2062 *
2063 * When an event is to be filtered, allocate per cpu buffers to
2064 * write the event data into, and if the event is filtered and discarded
2065 * it is simply dropped, otherwise, the entire data is to be committed
2066 * in one shot.
2067 */
2068void trace_buffered_event_enable(void)
2069{
2070 struct ring_buffer_event *event;
2071 struct page *page;
2072 int cpu;
2073
2074 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2075
2076 if (trace_buffered_event_ref++)
2077 return;
2078
2079 for_each_tracing_cpu(cpu) {
2080 page = alloc_pages_node(cpu_to_node(cpu),
2081 GFP_KERNEL | __GFP_NORETRY, 0);
2082 if (!page)
2083 goto failed;
2084
2085 event = page_address(page);
2086 memset(event, 0, sizeof(*event));
2087
2088 per_cpu(trace_buffered_event, cpu) = event;
2089
2090 preempt_disable();
2091 if (cpu == smp_processor_id() &&
2092 this_cpu_read(trace_buffered_event) !=
2093 per_cpu(trace_buffered_event, cpu))
2094 WARN_ON_ONCE(1);
2095 preempt_enable();
2096 }
2097
2098 return;
2099 failed:
2100 trace_buffered_event_disable();
2101}
2102
2103static void enable_trace_buffered_event(void *data)
2104{
2105 /* Probably not needed, but do it anyway */
2106 smp_rmb();
2107 this_cpu_dec(trace_buffered_event_cnt);
2108}
2109
2110static void disable_trace_buffered_event(void *data)
2111{
2112 this_cpu_inc(trace_buffered_event_cnt);
2113}
2114
2115/**
2116 * trace_buffered_event_disable - disable buffering events
2117 *
2118 * When a filter is removed, it is faster to not use the buffered
2119 * events, and to commit directly into the ring buffer. Free up
2120 * the temp buffers when there are no more users. This requires
2121 * special synchronization with current events.
2122 */
2123void trace_buffered_event_disable(void)
2124{
2125 int cpu;
2126
2127 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2128
2129 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2130 return;
2131
2132 if (--trace_buffered_event_ref)
2133 return;
2134
2135 preempt_disable();
2136 /* For each CPU, set the buffer as used. */
2137 smp_call_function_many(tracing_buffer_mask,
2138 disable_trace_buffered_event, NULL, 1);
2139 preempt_enable();
2140
2141 /* Wait for all current users to finish */
2142 synchronize_sched();
2143
2144 for_each_tracing_cpu(cpu) {
2145 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2146 per_cpu(trace_buffered_event, cpu) = NULL;
2147 }
2148 /*
2149 * Make sure trace_buffered_event is NULL before clearing
2150 * trace_buffered_event_cnt.
2151 */
2152 smp_wmb();
2153
2154 preempt_disable();
2155 /* Do the work on each cpu */
2156 smp_call_function_many(tracing_buffer_mask,
2157 enable_trace_buffered_event, NULL, 1);
2158 preempt_enable();
2159}
2160
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002161static struct ring_buffer *temp_buffer;
2162
Steven Rostedtef5580d2009-02-27 19:38:04 -05002163struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002164trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002165 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002166 int type, unsigned long len,
2167 unsigned long flags, int pc)
2168{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002169 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002170 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002171
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002172 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002173
2174 if ((trace_file->flags &
2175 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2176 (entry = this_cpu_read(trace_buffered_event))) {
2177 /* Try to use the per cpu buffer first */
2178 val = this_cpu_inc_return(trace_buffered_event_cnt);
2179 if (val == 1) {
2180 trace_event_setup(entry, type, flags, pc);
2181 entry->array[0] = len;
2182 return entry;
2183 }
2184 this_cpu_dec(trace_buffered_event_cnt);
2185 }
2186
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002187 entry = __trace_buffer_lock_reserve(*current_rb,
2188 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002189 /*
2190 * If tracing is off, but we have triggers enabled
2191 * we still need to look at the event data. Use the temp_buffer
2192 * to store the trace event for the tigger to use. It's recusive
2193 * safe and will not be recorded anywhere.
2194 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002195 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002196 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002197 entry = __trace_buffer_lock_reserve(*current_rb,
2198 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002199 }
2200 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002201}
2202EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2203
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002204static DEFINE_SPINLOCK(tracepoint_iter_lock);
2205static DEFINE_MUTEX(tracepoint_printk_mutex);
2206
2207static void output_printk(struct trace_event_buffer *fbuffer)
2208{
2209 struct trace_event_call *event_call;
2210 struct trace_event *event;
2211 unsigned long flags;
2212 struct trace_iterator *iter = tracepoint_print_iter;
2213
2214 /* We should never get here if iter is NULL */
2215 if (WARN_ON_ONCE(!iter))
2216 return;
2217
2218 event_call = fbuffer->trace_file->event_call;
2219 if (!event_call || !event_call->event.funcs ||
2220 !event_call->event.funcs->trace)
2221 return;
2222
2223 event = &fbuffer->trace_file->event_call->event;
2224
2225 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2226 trace_seq_init(&iter->seq);
2227 iter->ent = fbuffer->entry;
2228 event_call->event.funcs->trace(iter, 0, event);
2229 trace_seq_putc(&iter->seq, 0);
2230 printk("%s", iter->seq.buffer);
2231
2232 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2233}
2234
2235int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2236 void __user *buffer, size_t *lenp,
2237 loff_t *ppos)
2238{
2239 int save_tracepoint_printk;
2240 int ret;
2241
2242 mutex_lock(&tracepoint_printk_mutex);
2243 save_tracepoint_printk = tracepoint_printk;
2244
2245 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2246
2247 /*
2248 * This will force exiting early, as tracepoint_printk
2249 * is always zero when tracepoint_printk_iter is not allocated
2250 */
2251 if (!tracepoint_print_iter)
2252 tracepoint_printk = 0;
2253
2254 if (save_tracepoint_printk == tracepoint_printk)
2255 goto out;
2256
2257 if (tracepoint_printk)
2258 static_key_enable(&tracepoint_printk_key.key);
2259 else
2260 static_key_disable(&tracepoint_printk_key.key);
2261
2262 out:
2263 mutex_unlock(&tracepoint_printk_mutex);
2264
2265 return ret;
2266}
2267
2268void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2269{
2270 if (static_key_false(&tracepoint_printk_key.key))
2271 output_printk(fbuffer);
2272
2273 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2274 fbuffer->event, fbuffer->entry,
2275 fbuffer->flags, fbuffer->pc);
2276}
2277EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2278
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002279void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2280 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002281 struct ring_buffer_event *event,
2282 unsigned long flags, int pc,
2283 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002284{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002285 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002286
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002287 /*
2288 * If regs is not set, then skip the following callers:
2289 * trace_buffer_unlock_commit_regs
2290 * event_trigger_unlock_commit
2291 * trace_event_buffer_commit
2292 * trace_event_raw_event_sched_switch
2293 * Note, we can still get here via blktrace, wakeup tracer
2294 * and mmiotrace, but that's ok if they lose a function or
2295 * two. They are that meaningful.
2296 */
2297 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002298 ftrace_trace_userstack(buffer, flags, pc);
2299}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002300
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002301/*
2302 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2303 */
2304void
2305trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2306 struct ring_buffer_event *event)
2307{
2308 __buffer_unlock_commit(buffer, event);
2309}
2310
Chunyan Zhang478409d2016-11-21 15:57:18 +08002311static void
2312trace_process_export(struct trace_export *export,
2313 struct ring_buffer_event *event)
2314{
2315 struct trace_entry *entry;
2316 unsigned int size = 0;
2317
2318 entry = ring_buffer_event_data(event);
2319 size = ring_buffer_event_length(event);
2320 export->write(entry, size);
2321}
2322
2323static DEFINE_MUTEX(ftrace_export_lock);
2324
2325static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2326
2327static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2328
2329static inline void ftrace_exports_enable(void)
2330{
2331 static_branch_enable(&ftrace_exports_enabled);
2332}
2333
2334static inline void ftrace_exports_disable(void)
2335{
2336 static_branch_disable(&ftrace_exports_enabled);
2337}
2338
2339void ftrace_exports(struct ring_buffer_event *event)
2340{
2341 struct trace_export *export;
2342
2343 preempt_disable_notrace();
2344
2345 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2346 while (export) {
2347 trace_process_export(export, event);
2348 export = rcu_dereference_raw_notrace(export->next);
2349 }
2350
2351 preempt_enable_notrace();
2352}
2353
2354static inline void
2355add_trace_export(struct trace_export **list, struct trace_export *export)
2356{
2357 rcu_assign_pointer(export->next, *list);
2358 /*
2359 * We are entering export into the list but another
2360 * CPU might be walking that list. We need to make sure
2361 * the export->next pointer is valid before another CPU sees
2362 * the export pointer included into the list.
2363 */
2364 rcu_assign_pointer(*list, export);
2365}
2366
2367static inline int
2368rm_trace_export(struct trace_export **list, struct trace_export *export)
2369{
2370 struct trace_export **p;
2371
2372 for (p = list; *p != NULL; p = &(*p)->next)
2373 if (*p == export)
2374 break;
2375
2376 if (*p != export)
2377 return -1;
2378
2379 rcu_assign_pointer(*p, (*p)->next);
2380
2381 return 0;
2382}
2383
2384static inline void
2385add_ftrace_export(struct trace_export **list, struct trace_export *export)
2386{
2387 if (*list == NULL)
2388 ftrace_exports_enable();
2389
2390 add_trace_export(list, export);
2391}
2392
2393static inline int
2394rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2395{
2396 int ret;
2397
2398 ret = rm_trace_export(list, export);
2399 if (*list == NULL)
2400 ftrace_exports_disable();
2401
2402 return ret;
2403}
2404
2405int register_ftrace_export(struct trace_export *export)
2406{
2407 if (WARN_ON_ONCE(!export->write))
2408 return -1;
2409
2410 mutex_lock(&ftrace_export_lock);
2411
2412 add_ftrace_export(&ftrace_exports_list, export);
2413
2414 mutex_unlock(&ftrace_export_lock);
2415
2416 return 0;
2417}
2418EXPORT_SYMBOL_GPL(register_ftrace_export);
2419
2420int unregister_ftrace_export(struct trace_export *export)
2421{
2422 int ret;
2423
2424 mutex_lock(&ftrace_export_lock);
2425
2426 ret = rm_ftrace_export(&ftrace_exports_list, export);
2427
2428 mutex_unlock(&ftrace_export_lock);
2429
2430 return ret;
2431}
2432EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2433
Ingo Molnare309b412008-05-12 21:20:51 +02002434void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002435trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002436 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2437 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002438{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002439 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002440 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002441 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002442 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002444 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2445 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002446 if (!event)
2447 return;
2448 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002449 entry->ip = ip;
2450 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002451
Chunyan Zhang478409d2016-11-21 15:57:18 +08002452 if (!call_filter_check_discard(call, entry, buffer, event)) {
2453 if (static_branch_unlikely(&ftrace_exports_enabled))
2454 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002455 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002456 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002457}
2458
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002459#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002460
2461#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2462struct ftrace_stack {
2463 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2464};
2465
2466static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2467static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2468
Steven Rostedte77405a2009-09-02 14:17:06 -04002469static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002470 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002471 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002472{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002473 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002474 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002475 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002476 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002477 int use_stack;
2478 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002479
2480 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002481 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002482
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002483 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002484 * Add two, for this function and the call to save_stack_trace()
2485 * If regs is set, then these functions will not be in the way.
2486 */
2487 if (!regs)
2488 trace.skip += 2;
2489
2490 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002491 * Since events can happen in NMIs there's no safe way to
2492 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2493 * or NMI comes in, it will just have to use the default
2494 * FTRACE_STACK_SIZE.
2495 */
2496 preempt_disable_notrace();
2497
Shan Wei82146522012-11-19 13:21:01 +08002498 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002499 /*
2500 * We don't need any atomic variables, just a barrier.
2501 * If an interrupt comes in, we don't care, because it would
2502 * have exited and put the counter back to what we want.
2503 * We just need a barrier to keep gcc from moving things
2504 * around.
2505 */
2506 barrier();
2507 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002508 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002509 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2510
2511 if (regs)
2512 save_stack_trace_regs(regs, &trace);
2513 else
2514 save_stack_trace(&trace);
2515
2516 if (trace.nr_entries > size)
2517 size = trace.nr_entries;
2518 } else
2519 /* From now on, use_stack is a boolean */
2520 use_stack = 0;
2521
2522 size *= sizeof(unsigned long);
2523
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002524 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2525 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002526 if (!event)
2527 goto out;
2528 entry = ring_buffer_event_data(event);
2529
2530 memset(&entry->caller, 0, size);
2531
2532 if (use_stack)
2533 memcpy(&entry->caller, trace.entries,
2534 trace.nr_entries * sizeof(unsigned long));
2535 else {
2536 trace.max_entries = FTRACE_STACK_ENTRIES;
2537 trace.entries = entry->caller;
2538 if (regs)
2539 save_stack_trace_regs(regs, &trace);
2540 else
2541 save_stack_trace(&trace);
2542 }
2543
2544 entry->size = trace.nr_entries;
2545
Tom Zanussif306cc82013-10-24 08:34:17 -05002546 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002547 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002548
2549 out:
2550 /* Again, don't let gcc optimize things here */
2551 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002552 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002553 preempt_enable_notrace();
2554
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002555}
2556
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002557static inline void ftrace_trace_stack(struct trace_array *tr,
2558 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002559 unsigned long flags,
2560 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002561{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002562 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002563 return;
2564
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002565 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002566}
2567
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002568void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2569 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002570{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002571 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002572}
2573
Steven Rostedt03889382009-12-11 09:48:22 -05002574/**
2575 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002576 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002577 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002578void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002579{
2580 unsigned long flags;
2581
2582 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002583 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002584
2585 local_save_flags(flags);
2586
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002587 /*
2588 * Skip 3 more, seems to get us at the caller of
2589 * this function.
2590 */
2591 skip += 3;
2592 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2593 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002594}
2595
Steven Rostedt91e86e52010-11-10 12:56:12 +01002596static DEFINE_PER_CPU(int, user_stack_count);
2597
Steven Rostedte77405a2009-09-02 14:17:06 -04002598void
2599ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002600{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002601 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002602 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002603 struct userstack_entry *entry;
2604 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002605
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002606 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002607 return;
2608
Steven Rostedtb6345872010-03-12 20:03:30 -05002609 /*
2610 * NMIs can not handle page faults, even with fix ups.
2611 * The save user stack can (and often does) fault.
2612 */
2613 if (unlikely(in_nmi()))
2614 return;
2615
Steven Rostedt91e86e52010-11-10 12:56:12 +01002616 /*
2617 * prevent recursion, since the user stack tracing may
2618 * trigger other kernel events.
2619 */
2620 preempt_disable();
2621 if (__this_cpu_read(user_stack_count))
2622 goto out;
2623
2624 __this_cpu_inc(user_stack_count);
2625
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002626 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2627 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002628 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002629 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002630 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002631
Steven Rostedt48659d32009-09-11 11:36:23 -04002632 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002633 memset(&entry->caller, 0, sizeof(entry->caller));
2634
2635 trace.nr_entries = 0;
2636 trace.max_entries = FTRACE_STACK_ENTRIES;
2637 trace.skip = 0;
2638 trace.entries = entry->caller;
2639
2640 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002641 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002642 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002643
Li Zefan1dbd1952010-12-09 15:47:56 +08002644 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002645 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002646 out:
2647 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002648}
2649
Hannes Eder4fd27352009-02-10 19:44:12 +01002650#ifdef UNUSED
2651static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002652{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002653 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002654}
Hannes Eder4fd27352009-02-10 19:44:12 +01002655#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002656
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002657#endif /* CONFIG_STACKTRACE */
2658
Steven Rostedt07d777f2011-09-22 14:01:55 -04002659/* created for use with alloc_percpu */
2660struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002661 int nesting;
2662 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002663};
2664
2665static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002666
2667/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002668 * Thise allows for lockless recording. If we're nested too deeply, then
2669 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002670 */
2671static char *get_trace_buf(void)
2672{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002673 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002674
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002675 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002676 return NULL;
2677
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002678 return &buffer->buffer[buffer->nesting++][0];
2679}
2680
2681static void put_trace_buf(void)
2682{
2683 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002684}
2685
2686static int alloc_percpu_trace_buffer(void)
2687{
2688 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002689
2690 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002691 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2692 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002693
2694 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002695 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002696}
2697
Steven Rostedt81698832012-10-11 10:15:05 -04002698static int buffers_allocated;
2699
Steven Rostedt07d777f2011-09-22 14:01:55 -04002700void trace_printk_init_buffers(void)
2701{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002702 if (buffers_allocated)
2703 return;
2704
2705 if (alloc_percpu_trace_buffer())
2706 return;
2707
Steven Rostedt2184db42014-05-28 13:14:40 -04002708 /* trace_printk() is for debug use only. Don't use it in production. */
2709
Joe Perchesa395d6a2016-03-22 14:28:09 -07002710 pr_warn("\n");
2711 pr_warn("**********************************************************\n");
2712 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2713 pr_warn("** **\n");
2714 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2715 pr_warn("** **\n");
2716 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2717 pr_warn("** unsafe for production use. **\n");
2718 pr_warn("** **\n");
2719 pr_warn("** If you see this message and you are not debugging **\n");
2720 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2721 pr_warn("** **\n");
2722 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2723 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002724
Steven Rostedtb382ede62012-10-10 21:44:34 -04002725 /* Expand the buffers to set size */
2726 tracing_update_buffers();
2727
Steven Rostedt07d777f2011-09-22 14:01:55 -04002728 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002729
2730 /*
2731 * trace_printk_init_buffers() can be called by modules.
2732 * If that happens, then we need to start cmdline recording
2733 * directly here. If the global_trace.buffer is already
2734 * allocated here, then this was called by module code.
2735 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002736 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002737 tracing_start_cmdline_record();
2738}
2739
2740void trace_printk_start_comm(void)
2741{
2742 /* Start tracing comms if trace printk is set */
2743 if (!buffers_allocated)
2744 return;
2745 tracing_start_cmdline_record();
2746}
2747
2748static void trace_printk_start_stop_comm(int enabled)
2749{
2750 if (!buffers_allocated)
2751 return;
2752
2753 if (enabled)
2754 tracing_start_cmdline_record();
2755 else
2756 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002757}
2758
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002759/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002760 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002761 *
2762 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002763int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002764{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002765 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002766 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002767 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002768 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002769 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002770 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002771 char *tbuffer;
2772 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002773
2774 if (unlikely(tracing_selftest_running || tracing_disabled))
2775 return 0;
2776
2777 /* Don't pollute graph traces with trace_vprintk internals */
2778 pause_graph_tracing();
2779
2780 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002781 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002782
Steven Rostedt07d777f2011-09-22 14:01:55 -04002783 tbuffer = get_trace_buf();
2784 if (!tbuffer) {
2785 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002786 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002787 }
2788
2789 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2790
2791 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002792 goto out;
2793
Steven Rostedt07d777f2011-09-22 14:01:55 -04002794 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002795 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002796 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002797 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2798 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002799 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002800 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002801 entry = ring_buffer_event_data(event);
2802 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002803 entry->fmt = fmt;
2804
Steven Rostedt07d777f2011-09-22 14:01:55 -04002805 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002806 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002807 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002808 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002809 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002810
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002811out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002812 put_trace_buf();
2813
2814out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002815 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002816 unpause_graph_tracing();
2817
2818 return len;
2819}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002820EXPORT_SYMBOL_GPL(trace_vbprintk);
2821
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002822static int
2823__trace_array_vprintk(struct ring_buffer *buffer,
2824 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002825{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002826 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002827 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002828 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002829 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002830 unsigned long flags;
2831 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002832
2833 if (tracing_disabled || tracing_selftest_running)
2834 return 0;
2835
Steven Rostedt07d777f2011-09-22 14:01:55 -04002836 /* Don't pollute graph traces with trace_vprintk internals */
2837 pause_graph_tracing();
2838
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002839 pc = preempt_count();
2840 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002841
Steven Rostedt07d777f2011-09-22 14:01:55 -04002842
2843 tbuffer = get_trace_buf();
2844 if (!tbuffer) {
2845 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002846 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002847 }
2848
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002849 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002850
Steven Rostedt07d777f2011-09-22 14:01:55 -04002851 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002852 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002853 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2854 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002855 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002856 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002857 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002858 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002859
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002860 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002861 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002862 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002863 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002864 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002865
2866out:
2867 put_trace_buf();
2868
2869out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002870 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002871 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002872
2873 return len;
2874}
Steven Rostedt659372d2009-09-03 19:11:07 -04002875
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002876int trace_array_vprintk(struct trace_array *tr,
2877 unsigned long ip, const char *fmt, va_list args)
2878{
2879 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2880}
2881
2882int trace_array_printk(struct trace_array *tr,
2883 unsigned long ip, const char *fmt, ...)
2884{
2885 int ret;
2886 va_list ap;
2887
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002888 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002889 return 0;
2890
2891 va_start(ap, fmt);
2892 ret = trace_array_vprintk(tr, ip, fmt, ap);
2893 va_end(ap);
2894 return ret;
2895}
2896
2897int trace_array_printk_buf(struct ring_buffer *buffer,
2898 unsigned long ip, const char *fmt, ...)
2899{
2900 int ret;
2901 va_list ap;
2902
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002903 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002904 return 0;
2905
2906 va_start(ap, fmt);
2907 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2908 va_end(ap);
2909 return ret;
2910}
2911
Steven Rostedt659372d2009-09-03 19:11:07 -04002912int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2913{
Steven Rostedta813a152009-10-09 01:41:35 -04002914 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002915}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002916EXPORT_SYMBOL_GPL(trace_vprintk);
2917
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002918static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002919{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002920 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2921
Steven Rostedt5a90f572008-09-03 17:42:51 -04002922 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002923 if (buf_iter)
2924 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002925}
2926
Ingo Molnare309b412008-05-12 21:20:51 +02002927static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002928peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2929 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002930{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002931 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002932 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002933
Steven Rostedtd7690412008-10-01 00:29:53 -04002934 if (buf_iter)
2935 event = ring_buffer_iter_peek(buf_iter, ts);
2936 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002937 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002938 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002939
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002940 if (event) {
2941 iter->ent_size = ring_buffer_event_length(event);
2942 return ring_buffer_event_data(event);
2943 }
2944 iter->ent_size = 0;
2945 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002946}
Steven Rostedtd7690412008-10-01 00:29:53 -04002947
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002948static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002949__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2950 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002951{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002952 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002953 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002954 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002955 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002956 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002957 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002958 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002959 int cpu;
2960
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002961 /*
2962 * If we are in a per_cpu trace file, don't bother by iterating over
2963 * all cpu and peek directly.
2964 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002965 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002966 if (ring_buffer_empty_cpu(buffer, cpu_file))
2967 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002968 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002969 if (ent_cpu)
2970 *ent_cpu = cpu_file;
2971
2972 return ent;
2973 }
2974
Steven Rostedtab464282008-05-12 21:21:00 +02002975 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002976
2977 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002978 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002979
Steven Rostedtbc21b472010-03-31 19:49:26 -04002980 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002981
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02002982 /*
2983 * Pick the entry with the smallest timestamp:
2984 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002985 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002986 next = ent;
2987 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002988 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002989 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002990 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002991 }
2992 }
2993
Steven Rostedt12b5da32012-03-27 10:43:28 -04002994 iter->ent_size = next_size;
2995
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002996 if (ent_cpu)
2997 *ent_cpu = next_cpu;
2998
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002999 if (ent_ts)
3000 *ent_ts = next_ts;
3001
Steven Rostedtbc21b472010-03-31 19:49:26 -04003002 if (missing_events)
3003 *missing_events = next_lost;
3004
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003005 return next;
3006}
3007
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003008/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003009struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3010 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003011{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003012 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003013}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003014
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003015/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003016void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003017{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003018 iter->ent = __find_next_entry(iter, &iter->cpu,
3019 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003020
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003021 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003022 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003023
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003024 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003025}
3026
Ingo Molnare309b412008-05-12 21:20:51 +02003027static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003028{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003029 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003030 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003031}
3032
Ingo Molnare309b412008-05-12 21:20:51 +02003033static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003034{
3035 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003036 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003037 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003038
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003039 WARN_ON_ONCE(iter->leftover);
3040
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003041 (*pos)++;
3042
3043 /* can't go backwards */
3044 if (iter->idx > i)
3045 return NULL;
3046
3047 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003048 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003049 else
3050 ent = iter;
3051
3052 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003053 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003054
3055 iter->pos = *pos;
3056
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003057 return ent;
3058}
3059
Jason Wessel955b61e2010-08-05 09:22:23 -05003060void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003061{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003062 struct ring_buffer_event *event;
3063 struct ring_buffer_iter *buf_iter;
3064 unsigned long entries = 0;
3065 u64 ts;
3066
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003067 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003068
Steven Rostedt6d158a82012-06-27 20:46:14 -04003069 buf_iter = trace_buffer_iter(iter, cpu);
3070 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003071 return;
3072
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003073 ring_buffer_iter_reset(buf_iter);
3074
3075 /*
3076 * We could have the case with the max latency tracers
3077 * that a reset never took place on a cpu. This is evident
3078 * by the timestamp being before the start of the buffer.
3079 */
3080 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003081 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003082 break;
3083 entries++;
3084 ring_buffer_read(buf_iter, NULL);
3085 }
3086
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003087 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003088}
3089
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003090/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003091 * The current tracer is copied to avoid a global locking
3092 * all around.
3093 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003094static void *s_start(struct seq_file *m, loff_t *pos)
3095{
3096 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003097 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003098 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003099 void *p = NULL;
3100 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003101 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003102
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003103 /*
3104 * copy the tracer to avoid using a global lock all around.
3105 * iter->trace is a copy of current_trace, the pointer to the
3106 * name may be used instead of a strcmp(), as iter->trace->name
3107 * will point to the same string as current_trace->name.
3108 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003109 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003110 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3111 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003112 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003114#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003115 if (iter->snapshot && iter->trace->use_max_tr)
3116 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003117#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003118
3119 if (!iter->snapshot)
3120 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003121
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003122 if (*pos != iter->pos) {
3123 iter->ent = NULL;
3124 iter->cpu = 0;
3125 iter->idx = -1;
3126
Steven Rostedtae3b5092013-01-23 15:22:59 -05003127 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003128 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003129 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003130 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003131 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003132
Lai Jiangshanac91d852010-03-02 17:54:50 +08003133 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003134 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3135 ;
3136
3137 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003138 /*
3139 * If we overflowed the seq_file before, then we want
3140 * to just reuse the trace_seq buffer again.
3141 */
3142 if (iter->leftover)
3143 p = iter;
3144 else {
3145 l = *pos - 1;
3146 p = s_next(m, p, &l);
3147 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003148 }
3149
Lai Jiangshan4f535962009-05-18 19:35:34 +08003150 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003151 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003152 return p;
3153}
3154
3155static void s_stop(struct seq_file *m, void *p)
3156{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003157 struct trace_iterator *iter = m->private;
3158
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003159#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003160 if (iter->snapshot && iter->trace->use_max_tr)
3161 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003162#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003163
3164 if (!iter->snapshot)
3165 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003166
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003167 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003168 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169}
3170
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003171static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003172get_total_entries(struct trace_buffer *buf,
3173 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003174{
3175 unsigned long count;
3176 int cpu;
3177
3178 *total = 0;
3179 *entries = 0;
3180
3181 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003182 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003183 /*
3184 * If this buffer has skipped entries, then we hold all
3185 * entries for the trace and we need to ignore the
3186 * ones before the time stamp.
3187 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003188 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3189 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003190 /* total is the same as the entries */
3191 *total += count;
3192 } else
3193 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003194 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003195 *entries += count;
3196 }
3197}
3198
Ingo Molnare309b412008-05-12 21:20:51 +02003199static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003200{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003201 seq_puts(m, "# _------=> CPU# \n"
3202 "# / _-----=> irqs-off \n"
3203 "# | / _----=> need-resched \n"
3204 "# || / _---=> hardirq/softirq \n"
3205 "# ||| / _--=> preempt-depth \n"
3206 "# |||| / delay \n"
3207 "# cmd pid ||||| time | caller \n"
3208 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003209}
3210
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003211static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003212{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003213 unsigned long total;
3214 unsigned long entries;
3215
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003216 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003217 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3218 entries, total, num_online_cpus());
3219 seq_puts(m, "#\n");
3220}
3221
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003222static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003223{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003224 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003225 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
3226 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003227}
3228
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003229static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003230{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003231 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003232 seq_puts(m, "# _-----=> irqs-off\n"
3233 "# / _----=> need-resched\n"
3234 "# | / _---=> hardirq/softirq\n"
3235 "# || / _--=> preempt-depth\n"
3236 "# ||| / delay\n"
3237 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
3238 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003239}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003240
Jiri Olsa62b915f2010-04-02 19:01:22 +02003241void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003242print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3243{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003244 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003245 struct trace_buffer *buf = iter->trace_buffer;
3246 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003247 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003248 unsigned long entries;
3249 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003250 const char *name = "preemption";
3251
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003252 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003253
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003254 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003255
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003256 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003257 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003258 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003259 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003260 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003261 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003262 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003263 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003264 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003265 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003266#if defined(CONFIG_PREEMPT_NONE)
3267 "server",
3268#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3269 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003270#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003271 "preempt",
3272#else
3273 "unknown",
3274#endif
3275 /* These are reserved for later use */
3276 0, 0, 0, 0);
3277#ifdef CONFIG_SMP
3278 seq_printf(m, " #P:%d)\n", num_online_cpus());
3279#else
3280 seq_puts(m, ")\n");
3281#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003282 seq_puts(m, "# -----------------\n");
3283 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003284 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003285 data->comm, data->pid,
3286 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003287 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003288 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003289
3290 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003291 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003292 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3293 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003294 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003295 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3296 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003297 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003298 }
3299
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003300 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003301}
3302
Steven Rostedta3097202008-11-07 22:36:02 -05003303static void test_cpu_buff_start(struct trace_iterator *iter)
3304{
3305 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003306 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003307
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003308 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003309 return;
3310
3311 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3312 return;
3313
Sasha Levin919cd972015-09-04 12:45:56 -04003314 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003315 return;
3316
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003317 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003318 return;
3319
Sasha Levin919cd972015-09-04 12:45:56 -04003320 if (iter->started)
3321 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003322
3323 /* Don't print started cpu buffer for the first entry of the trace */
3324 if (iter->idx > 1)
3325 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3326 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003327}
3328
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003329static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003330{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003331 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003332 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003333 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003334 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003335 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003336
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003337 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003338
Steven Rostedta3097202008-11-07 22:36:02 -05003339 test_cpu_buff_start(iter);
3340
Steven Rostedtf633cef2008-12-23 23:24:13 -05003341 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003342
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003343 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003344 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3345 trace_print_lat_context(iter);
3346 else
3347 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003348 }
3349
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003350 if (trace_seq_has_overflowed(s))
3351 return TRACE_TYPE_PARTIAL_LINE;
3352
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003353 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003354 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003355
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003356 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003357
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003358 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003359}
3360
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003361static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003362{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003363 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003364 struct trace_seq *s = &iter->seq;
3365 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003366 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003367
3368 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003369
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003370 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003371 trace_seq_printf(s, "%d %d %llu ",
3372 entry->pid, iter->cpu, iter->ts);
3373
3374 if (trace_seq_has_overflowed(s))
3375 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003376
Steven Rostedtf633cef2008-12-23 23:24:13 -05003377 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003378 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003379 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003380
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003381 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003382
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003383 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003384}
3385
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003386static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003387{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003388 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003389 struct trace_seq *s = &iter->seq;
3390 unsigned char newline = '\n';
3391 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003392 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003393
3394 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003395
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003396 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003397 SEQ_PUT_HEX_FIELD(s, entry->pid);
3398 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3399 SEQ_PUT_HEX_FIELD(s, iter->ts);
3400 if (trace_seq_has_overflowed(s))
3401 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003402 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003403
Steven Rostedtf633cef2008-12-23 23:24:13 -05003404 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003405 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003406 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003407 if (ret != TRACE_TYPE_HANDLED)
3408 return ret;
3409 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003410
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003411 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003412
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003413 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003414}
3415
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003416static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003417{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003418 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003419 struct trace_seq *s = &iter->seq;
3420 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003421 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003422
3423 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003424
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003425 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003426 SEQ_PUT_FIELD(s, entry->pid);
3427 SEQ_PUT_FIELD(s, iter->cpu);
3428 SEQ_PUT_FIELD(s, iter->ts);
3429 if (trace_seq_has_overflowed(s))
3430 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003431 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003432
Steven Rostedtf633cef2008-12-23 23:24:13 -05003433 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003434 return event ? event->funcs->binary(iter, 0, event) :
3435 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003436}
3437
Jiri Olsa62b915f2010-04-02 19:01:22 +02003438int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003439{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003440 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003441 int cpu;
3442
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003443 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003444 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003445 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003446 buf_iter = trace_buffer_iter(iter, cpu);
3447 if (buf_iter) {
3448 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003449 return 0;
3450 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003451 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003452 return 0;
3453 }
3454 return 1;
3455 }
3456
Steven Rostedtab464282008-05-12 21:21:00 +02003457 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003458 buf_iter = trace_buffer_iter(iter, cpu);
3459 if (buf_iter) {
3460 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003461 return 0;
3462 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003463 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003464 return 0;
3465 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003466 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003467
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003468 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003469}
3470
Lai Jiangshan4f535962009-05-18 19:35:34 +08003471/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003472enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003473{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003474 struct trace_array *tr = iter->tr;
3475 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003476 enum print_line_t ret;
3477
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003478 if (iter->lost_events) {
3479 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3480 iter->cpu, iter->lost_events);
3481 if (trace_seq_has_overflowed(&iter->seq))
3482 return TRACE_TYPE_PARTIAL_LINE;
3483 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003484
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003485 if (iter->trace && iter->trace->print_line) {
3486 ret = iter->trace->print_line(iter);
3487 if (ret != TRACE_TYPE_UNHANDLED)
3488 return ret;
3489 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003490
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003491 if (iter->ent->type == TRACE_BPUTS &&
3492 trace_flags & TRACE_ITER_PRINTK &&
3493 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3494 return trace_print_bputs_msg_only(iter);
3495
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003496 if (iter->ent->type == TRACE_BPRINT &&
3497 trace_flags & TRACE_ITER_PRINTK &&
3498 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003499 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003500
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003501 if (iter->ent->type == TRACE_PRINT &&
3502 trace_flags & TRACE_ITER_PRINTK &&
3503 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003504 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003505
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003506 if (trace_flags & TRACE_ITER_BIN)
3507 return print_bin_fmt(iter);
3508
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003509 if (trace_flags & TRACE_ITER_HEX)
3510 return print_hex_fmt(iter);
3511
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003512 if (trace_flags & TRACE_ITER_RAW)
3513 return print_raw_fmt(iter);
3514
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003515 return print_trace_fmt(iter);
3516}
3517
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003518void trace_latency_header(struct seq_file *m)
3519{
3520 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003521 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003522
3523 /* print nothing if the buffers are empty */
3524 if (trace_empty(iter))
3525 return;
3526
3527 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3528 print_trace_header(m, iter);
3529
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003530 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003531 print_lat_help_header(m);
3532}
3533
Jiri Olsa62b915f2010-04-02 19:01:22 +02003534void trace_default_header(struct seq_file *m)
3535{
3536 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003537 struct trace_array *tr = iter->tr;
3538 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003539
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003540 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3541 return;
3542
Jiri Olsa62b915f2010-04-02 19:01:22 +02003543 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3544 /* print nothing if the buffers are empty */
3545 if (trace_empty(iter))
3546 return;
3547 print_trace_header(m, iter);
3548 if (!(trace_flags & TRACE_ITER_VERBOSE))
3549 print_lat_help_header(m);
3550 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003551 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3552 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003553 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003554 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003555 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003556 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003557 }
3558}
3559
Steven Rostedte0a413f2011-09-29 21:26:16 -04003560static void test_ftrace_alive(struct seq_file *m)
3561{
3562 if (!ftrace_is_dead())
3563 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003564 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3565 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003566}
3567
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003568#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003569static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003570{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003571 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3572 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3573 "# Takes a snapshot of the main buffer.\n"
3574 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3575 "# (Doesn't have to be '2' works with any number that\n"
3576 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003577}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003578
3579static void show_snapshot_percpu_help(struct seq_file *m)
3580{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003581 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003582#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003583 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3584 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003585#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003586 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3587 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003588#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003589 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3590 "# (Doesn't have to be '2' works with any number that\n"
3591 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003592}
3593
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003594static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3595{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003596 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003597 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003598 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003599 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003600
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003601 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003602 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3603 show_snapshot_main_help(m);
3604 else
3605 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003606}
3607#else
3608/* Should never be called */
3609static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3610#endif
3611
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003612static int s_show(struct seq_file *m, void *v)
3613{
3614 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003615 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003616
3617 if (iter->ent == NULL) {
3618 if (iter->tr) {
3619 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3620 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003621 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003622 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003623 if (iter->snapshot && trace_empty(iter))
3624 print_snapshot_help(m, iter);
3625 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003626 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003627 else
3628 trace_default_header(m);
3629
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003630 } else if (iter->leftover) {
3631 /*
3632 * If we filled the seq_file buffer earlier, we
3633 * want to just show it now.
3634 */
3635 ret = trace_print_seq(m, &iter->seq);
3636
3637 /* ret should this time be zero, but you never know */
3638 iter->leftover = ret;
3639
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003640 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003641 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003642 ret = trace_print_seq(m, &iter->seq);
3643 /*
3644 * If we overflow the seq_file buffer, then it will
3645 * ask us for this data again at start up.
3646 * Use that instead.
3647 * ret is 0 if seq_file write succeeded.
3648 * -1 otherwise.
3649 */
3650 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003651 }
3652
3653 return 0;
3654}
3655
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003656/*
3657 * Should be used after trace_array_get(), trace_types_lock
3658 * ensures that i_cdev was already initialized.
3659 */
3660static inline int tracing_get_cpu(struct inode *inode)
3661{
3662 if (inode->i_cdev) /* See trace_create_cpu_file() */
3663 return (long)inode->i_cdev - 1;
3664 return RING_BUFFER_ALL_CPUS;
3665}
3666
James Morris88e9d342009-09-22 16:43:43 -07003667static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003668 .start = s_start,
3669 .next = s_next,
3670 .stop = s_stop,
3671 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003672};
3673
Ingo Molnare309b412008-05-12 21:20:51 +02003674static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003675__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003676{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003677 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003678 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003679 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003680
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003681 if (tracing_disabled)
3682 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003683
Jiri Olsa50e18b92012-04-25 10:23:39 +02003684 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003685 if (!iter)
3686 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003687
Gil Fruchter72917232015-06-09 10:32:35 +03003688 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003689 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003690 if (!iter->buffer_iter)
3691 goto release;
3692
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003693 /*
3694 * We make a copy of the current tracer to avoid concurrent
3695 * changes on it while we are reading.
3696 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003697 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003698 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003699 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003700 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003701
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003702 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003703
Li Zefan79f55992009-06-15 14:58:26 +08003704 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003705 goto fail;
3706
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003707 iter->tr = tr;
3708
3709#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003710 /* Currently only the top directory has a snapshot */
3711 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003712 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003713 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003714#endif
3715 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003716 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003717 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003718 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003719 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003720
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003721 /* Notify the tracer early; before we stop tracing. */
3722 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003723 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003724
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003725 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003726 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003727 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3728
David Sharp8be07092012-11-13 12:18:22 -08003729 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003730 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003731 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3732
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003733 /* stop the trace while dumping if we are not opening "snapshot" */
3734 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003735 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003736
Steven Rostedtae3b5092013-01-23 15:22:59 -05003737 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003738 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003739 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003740 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003741 }
3742 ring_buffer_read_prepare_sync();
3743 for_each_tracing_cpu(cpu) {
3744 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003745 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003746 }
3747 } else {
3748 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003749 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003750 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003751 ring_buffer_read_prepare_sync();
3752 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003753 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003754 }
3755
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003756 mutex_unlock(&trace_types_lock);
3757
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003758 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003759
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003760 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003761 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003762 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003763 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003764release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003765 seq_release_private(inode, file);
3766 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003767}
3768
3769int tracing_open_generic(struct inode *inode, struct file *filp)
3770{
Steven Rostedt60a11772008-05-12 21:20:44 +02003771 if (tracing_disabled)
3772 return -ENODEV;
3773
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003774 filp->private_data = inode->i_private;
3775 return 0;
3776}
3777
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003778bool tracing_is_disabled(void)
3779{
3780 return (tracing_disabled) ? true: false;
3781}
3782
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003783/*
3784 * Open and update trace_array ref count.
3785 * Must have the current trace_array passed to it.
3786 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003787static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003788{
3789 struct trace_array *tr = inode->i_private;
3790
3791 if (tracing_disabled)
3792 return -ENODEV;
3793
3794 if (trace_array_get(tr) < 0)
3795 return -ENODEV;
3796
3797 filp->private_data = inode->i_private;
3798
3799 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003800}
3801
Hannes Eder4fd27352009-02-10 19:44:12 +01003802static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003803{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003804 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003805 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003806 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003807 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003808
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003809 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003810 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003811 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003812 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003813
Oleg Nesterov6484c712013-07-23 17:26:10 +02003814 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003815 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003816 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003817
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003818 for_each_tracing_cpu(cpu) {
3819 if (iter->buffer_iter[cpu])
3820 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3821 }
3822
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003823 if (iter->trace && iter->trace->close)
3824 iter->trace->close(iter);
3825
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003826 if (!iter->snapshot)
3827 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003828 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003829
3830 __trace_array_put(tr);
3831
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003832 mutex_unlock(&trace_types_lock);
3833
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003834 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003835 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003836 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003837 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003838 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003839
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003840 return 0;
3841}
3842
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003843static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3844{
3845 struct trace_array *tr = inode->i_private;
3846
3847 trace_array_put(tr);
3848 return 0;
3849}
3850
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003851static int tracing_single_release_tr(struct inode *inode, struct file *file)
3852{
3853 struct trace_array *tr = inode->i_private;
3854
3855 trace_array_put(tr);
3856
3857 return single_release(inode, file);
3858}
3859
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003860static int tracing_open(struct inode *inode, struct file *file)
3861{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003862 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003863 struct trace_iterator *iter;
3864 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003865
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003866 if (trace_array_get(tr) < 0)
3867 return -ENODEV;
3868
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003869 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003870 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3871 int cpu = tracing_get_cpu(inode);
3872
3873 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003874 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003875 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003876 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003877 }
3878
3879 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003880 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003881 if (IS_ERR(iter))
3882 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003883 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003884 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3885 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003886
3887 if (ret < 0)
3888 trace_array_put(tr);
3889
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003890 return ret;
3891}
3892
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003893/*
3894 * Some tracers are not suitable for instance buffers.
3895 * A tracer is always available for the global array (toplevel)
3896 * or if it explicitly states that it is.
3897 */
3898static bool
3899trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3900{
3901 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3902}
3903
3904/* Find the next tracer that this trace array may use */
3905static struct tracer *
3906get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3907{
3908 while (t && !trace_ok_for_array(t, tr))
3909 t = t->next;
3910
3911 return t;
3912}
3913
Ingo Molnare309b412008-05-12 21:20:51 +02003914static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003915t_next(struct seq_file *m, void *v, loff_t *pos)
3916{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003917 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003918 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003919
3920 (*pos)++;
3921
3922 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003923 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003924
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003925 return t;
3926}
3927
3928static void *t_start(struct seq_file *m, loff_t *pos)
3929{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003930 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003931 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003932 loff_t l = 0;
3933
3934 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003935
3936 t = get_tracer_for_array(tr, trace_types);
3937 for (; t && l < *pos; t = t_next(m, t, &l))
3938 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003939
3940 return t;
3941}
3942
3943static void t_stop(struct seq_file *m, void *p)
3944{
3945 mutex_unlock(&trace_types_lock);
3946}
3947
3948static int t_show(struct seq_file *m, void *v)
3949{
3950 struct tracer *t = v;
3951
3952 if (!t)
3953 return 0;
3954
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003955 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003956 if (t->next)
3957 seq_putc(m, ' ');
3958 else
3959 seq_putc(m, '\n');
3960
3961 return 0;
3962}
3963
James Morris88e9d342009-09-22 16:43:43 -07003964static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003965 .start = t_start,
3966 .next = t_next,
3967 .stop = t_stop,
3968 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003969};
3970
3971static int show_traces_open(struct inode *inode, struct file *file)
3972{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003973 struct trace_array *tr = inode->i_private;
3974 struct seq_file *m;
3975 int ret;
3976
Steven Rostedt60a11772008-05-12 21:20:44 +02003977 if (tracing_disabled)
3978 return -ENODEV;
3979
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003980 ret = seq_open(file, &show_traces_seq_ops);
3981 if (ret)
3982 return ret;
3983
3984 m = file->private_data;
3985 m->private = tr;
3986
3987 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003988}
3989
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003990static ssize_t
3991tracing_write_stub(struct file *filp, const char __user *ubuf,
3992 size_t count, loff_t *ppos)
3993{
3994 return count;
3995}
3996
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003997loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003998{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003999 int ret;
4000
Slava Pestov364829b2010-11-24 15:13:16 -08004001 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004002 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004003 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004004 file->f_pos = ret = 0;
4005
4006 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004007}
4008
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004009static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004010 .open = tracing_open,
4011 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004012 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004013 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004014 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004015};
4016
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004017static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004018 .open = show_traces_open,
4019 .read = seq_read,
4020 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004021 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004022};
4023
Ingo Molnar36dfe922008-05-12 21:20:52 +02004024/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02004025 * The tracer itself will not take this lock, but still we want
4026 * to provide a consistent cpumask to user-space:
4027 */
4028static DEFINE_MUTEX(tracing_cpumask_update_lock);
4029
4030/*
4031 * Temporary storage for the character representation of the
4032 * CPU bitmask (and one more byte for the newline):
4033 */
4034static char mask_str[NR_CPUS + 1];
4035
Ingo Molnarc7078de2008-05-12 21:20:52 +02004036static ssize_t
4037tracing_cpumask_read(struct file *filp, char __user *ubuf,
4038 size_t count, loff_t *ppos)
4039{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004040 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004041 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004042
4043 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004044
Tejun Heo1a402432015-02-13 14:37:39 -08004045 len = snprintf(mask_str, count, "%*pb\n",
4046 cpumask_pr_args(tr->tracing_cpumask));
4047 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004048 count = -EINVAL;
4049 goto out_err;
4050 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02004051 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4052
4053out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02004054 mutex_unlock(&tracing_cpumask_update_lock);
4055
4056 return count;
4057}
4058
4059static ssize_t
4060tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4061 size_t count, loff_t *ppos)
4062{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004063 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304064 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004065 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304066
4067 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4068 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004069
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304070 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004071 if (err)
4072 goto err_unlock;
4073
Li Zefan215368e2009-06-15 10:56:42 +08004074 mutex_lock(&tracing_cpumask_update_lock);
4075
Steven Rostedta5e25882008-12-02 15:34:05 -05004076 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004077 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004078 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004079 /*
4080 * Increase/decrease the disabled counter if we are
4081 * about to flip a bit in the cpumask:
4082 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004083 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304084 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004085 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4086 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004087 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004088 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304089 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004090 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4091 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004092 }
4093 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004094 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004095 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004096
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004097 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004098
Ingo Molnarc7078de2008-05-12 21:20:52 +02004099 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304100 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004101
Ingo Molnarc7078de2008-05-12 21:20:52 +02004102 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004103
4104err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004105 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004106
4107 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004108}
4109
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004110static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004111 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004112 .read = tracing_cpumask_read,
4113 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004114 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004115 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004116};
4117
Li Zefanfdb372e2009-12-08 11:15:59 +08004118static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004119{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004120 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004121 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004122 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004123 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004124
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004125 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004126 tracer_flags = tr->current_trace->flags->val;
4127 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004128
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004129 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004130 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004131 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004132 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004133 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004134 }
4135
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004136 for (i = 0; trace_opts[i].name; i++) {
4137 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004138 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004139 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004140 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004141 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004142 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004143
Li Zefanfdb372e2009-12-08 11:15:59 +08004144 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004145}
4146
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004147static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004148 struct tracer_flags *tracer_flags,
4149 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004150{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004151 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004152 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004153
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004154 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004155 if (ret)
4156 return ret;
4157
4158 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004159 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004160 else
Zhaolei77708412009-08-07 18:53:21 +08004161 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004162 return 0;
4163}
4164
Li Zefan8d18eaa2009-12-08 11:17:06 +08004165/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004166static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004167{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004168 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004169 struct tracer_flags *tracer_flags = trace->flags;
4170 struct tracer_opt *opts = NULL;
4171 int i;
4172
4173 for (i = 0; tracer_flags->opts[i].name; i++) {
4174 opts = &tracer_flags->opts[i];
4175
4176 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004177 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004178 }
4179
4180 return -EINVAL;
4181}
4182
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004183/* Some tracers require overwrite to stay enabled */
4184int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4185{
4186 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4187 return -1;
4188
4189 return 0;
4190}
4191
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004192int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004193{
4194 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004195 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004196 return 0;
4197
4198 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004199 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004200 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004201 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004202
4203 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004204 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004205 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004206 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004207
4208 if (mask == TRACE_ITER_RECORD_CMD)
4209 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004210
Steven Rostedtc37775d2016-04-13 16:59:18 -04004211 if (mask == TRACE_ITER_EVENT_FORK)
4212 trace_event_follow_fork(tr, enabled);
4213
Namhyung Kim1e104862017-04-17 11:44:28 +09004214 if (mask == TRACE_ITER_FUNC_FORK)
4215 ftrace_pid_follow_fork(tr, enabled);
4216
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004217 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004218 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004219#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004220 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004221#endif
4222 }
Steven Rostedt81698832012-10-11 10:15:05 -04004223
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004224 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004225 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004226 trace_printk_control(enabled);
4227 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004228
4229 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004230}
4231
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004232static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004233{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004234 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004235 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004236 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004238 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004239
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004240 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004241
Li Zefan8d18eaa2009-12-08 11:17:06 +08004242 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004243 neg = 1;
4244 cmp += 2;
4245 }
4246
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004247 mutex_lock(&trace_types_lock);
4248
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004249 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004250 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004251 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004252 break;
4253 }
4254 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004255
4256 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004257 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004258 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004259
4260 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004261
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004262 /*
4263 * If the first trailing whitespace is replaced with '\0' by strstrip,
4264 * turn it back into a space.
4265 */
4266 if (orig_len > strlen(option))
4267 option[strlen(option)] = ' ';
4268
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004269 return ret;
4270}
4271
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004272static void __init apply_trace_boot_options(void)
4273{
4274 char *buf = trace_boot_options_buf;
4275 char *option;
4276
4277 while (true) {
4278 option = strsep(&buf, ",");
4279
4280 if (!option)
4281 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004282
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004283 if (*option)
4284 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004285
4286 /* Put back the comma to allow this to be called again */
4287 if (buf)
4288 *(buf - 1) = ',';
4289 }
4290}
4291
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004292static ssize_t
4293tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4294 size_t cnt, loff_t *ppos)
4295{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004296 struct seq_file *m = filp->private_data;
4297 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004298 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004299 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004300
4301 if (cnt >= sizeof(buf))
4302 return -EINVAL;
4303
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004304 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004305 return -EFAULT;
4306
Steven Rostedta8dd2172013-01-09 20:54:17 -05004307 buf[cnt] = 0;
4308
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004309 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004310 if (ret < 0)
4311 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004312
Jiri Olsacf8517c2009-10-23 19:36:16 -04004313 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004314
4315 return cnt;
4316}
4317
Li Zefanfdb372e2009-12-08 11:15:59 +08004318static int tracing_trace_options_open(struct inode *inode, struct file *file)
4319{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004320 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004321 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004322
Li Zefanfdb372e2009-12-08 11:15:59 +08004323 if (tracing_disabled)
4324 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004325
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004326 if (trace_array_get(tr) < 0)
4327 return -ENODEV;
4328
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004329 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4330 if (ret < 0)
4331 trace_array_put(tr);
4332
4333 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004334}
4335
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004336static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004337 .open = tracing_trace_options_open,
4338 .read = seq_read,
4339 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004340 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004341 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004342};
4343
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004344static const char readme_msg[] =
4345 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004346 "# echo 0 > tracing_on : quick way to disable tracing\n"
4347 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4348 " Important files:\n"
4349 " trace\t\t\t- The static contents of the buffer\n"
4350 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4351 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4352 " current_tracer\t- function and latency tracers\n"
4353 " available_tracers\t- list of configured tracers for current_tracer\n"
4354 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4355 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4356 " trace_clock\t\t-change the clock used to order events\n"
4357 " local: Per cpu clock but may not be synced across CPUs\n"
4358 " global: Synced across CPUs but slows tracing down.\n"
4359 " counter: Not a clock, but just an increment\n"
4360 " uptime: Jiffy counter from time of boot\n"
4361 " perf: Same clock that perf events use\n"
4362#ifdef CONFIG_X86_64
4363 " x86-tsc: TSC cycle counter\n"
4364#endif
4365 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004366 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004367 " tracing_cpumask\t- Limit which CPUs to trace\n"
4368 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4369 "\t\t\t Remove sub-buffer with rmdir\n"
4370 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004371 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4372 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004373 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004374#ifdef CONFIG_DYNAMIC_FTRACE
4375 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004376 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4377 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004378 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004379 "\t modules: Can select a group via module\n"
4380 "\t Format: :mod:<module-name>\n"
4381 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4382 "\t triggers: a command to perform when function is hit\n"
4383 "\t Format: <function>:<trigger>[:count]\n"
4384 "\t trigger: traceon, traceoff\n"
4385 "\t\t enable_event:<system>:<event>\n"
4386 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004387#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004388 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004389#endif
4390#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004391 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004392#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004393 "\t\t dump\n"
4394 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004395 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4396 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4397 "\t The first one will disable tracing every time do_fault is hit\n"
4398 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4399 "\t The first time do trap is hit and it disables tracing, the\n"
4400 "\t counter will decrement to 2. If tracing is already disabled,\n"
4401 "\t the counter will not decrement. It only decrements when the\n"
4402 "\t trigger did work\n"
4403 "\t To remove trigger without count:\n"
4404 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4405 "\t To remove trigger with a count:\n"
4406 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004407 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004408 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4409 "\t modules: Can select a group via module command :mod:\n"
4410 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004411#endif /* CONFIG_DYNAMIC_FTRACE */
4412#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004413 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4414 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004415#endif
4416#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4417 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004418 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004419 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4420#endif
4421#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004422 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4423 "\t\t\t snapshot buffer. Read the contents for more\n"
4424 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004425#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004426#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004427 " stack_trace\t\t- Shows the max stack trace when active\n"
4428 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004429 "\t\t\t Write into this file to reset the max size (trigger a\n"
4430 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004431#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004432 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4433 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004434#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004435#endif /* CONFIG_STACK_TRACER */
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004436#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004437 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4438 "\t\t\t Write into this file to define/undefine new trace events.\n"
4439#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004440#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004441 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4442 "\t\t\t Write into this file to define/undefine new trace events.\n"
4443#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004444#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004445 "\t accepts: event-definitions (one definition per line)\n"
4446 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4447 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004448#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004449 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4450#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004451#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004452 "\t place: <path>:<offset>\n"
4453#endif
4454 "\t args: <name>=fetcharg[:type]\n"
4455 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4456 "\t $stack<index>, $stack, $retval, $comm\n"
4457 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4458 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4459#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004460 " events/\t\t- Directory containing all trace event subsystems:\n"
4461 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4462 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004463 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4464 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004465 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004466 " events/<system>/<event>/\t- Directory containing control files for\n"
4467 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004468 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4469 " filter\t\t- If set, only events passing filter are traced\n"
4470 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004471 "\t Format: <trigger>[:count][if <filter>]\n"
4472 "\t trigger: traceon, traceoff\n"
4473 "\t enable_event:<system>:<event>\n"
4474 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004475#ifdef CONFIG_HIST_TRIGGERS
4476 "\t enable_hist:<system>:<event>\n"
4477 "\t disable_hist:<system>:<event>\n"
4478#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004479#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004480 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004481#endif
4482#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004483 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004484#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004485#ifdef CONFIG_HIST_TRIGGERS
4486 "\t\t hist (see below)\n"
4487#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004488 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4489 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4490 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4491 "\t events/block/block_unplug/trigger\n"
4492 "\t The first disables tracing every time block_unplug is hit.\n"
4493 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4494 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4495 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4496 "\t Like function triggers, the counter is only decremented if it\n"
4497 "\t enabled or disabled tracing.\n"
4498 "\t To remove a trigger without a count:\n"
4499 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4500 "\t To remove a trigger with a count:\n"
4501 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4502 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004503#ifdef CONFIG_HIST_TRIGGERS
4504 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004505 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004506 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004507 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004508 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004509 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004510 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004511 "\t [if <filter>]\n\n"
4512 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004513 "\t table using the key(s) and value(s) named, and the value of a\n"
4514 "\t sum called 'hitcount' is incremented. Keys and values\n"
4515 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004516 "\t can be any field, or the special string 'stacktrace'.\n"
4517 "\t Compound keys consisting of up to two fields can be specified\n"
4518 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4519 "\t fields. Sort keys consisting of up to two fields can be\n"
4520 "\t specified using the 'sort' keyword. The sort direction can\n"
4521 "\t be modified by appending '.descending' or '.ascending' to a\n"
4522 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004523 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4524 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4525 "\t its histogram data will be shared with other triggers of the\n"
4526 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004527 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004528 "\t table in its entirety to stdout. If there are multiple hist\n"
4529 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004530 "\t trigger in the output. The table displayed for a named\n"
4531 "\t trigger will be the same as any other instance having the\n"
4532 "\t same name. The default format used to display a given field\n"
4533 "\t can be modified by appending any of the following modifiers\n"
4534 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004535 "\t .hex display a number as a hex value\n"
4536 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004537 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004538 "\t .execname display a common_pid as a program name\n"
4539 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004540 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004541 "\t The 'pause' parameter can be used to pause an existing hist\n"
4542 "\t trigger or to start a hist trigger but not log any events\n"
4543 "\t until told to do so. 'continue' can be used to start or\n"
4544 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004545 "\t The 'clear' parameter will clear the contents of a running\n"
4546 "\t hist trigger and leave its current paused/active state\n"
4547 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004548 "\t The enable_hist and disable_hist triggers can be used to\n"
4549 "\t have one event conditionally start and stop another event's\n"
4550 "\t already-attached hist trigger. The syntax is analagous to\n"
4551 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004552#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004553;
4554
4555static ssize_t
4556tracing_readme_read(struct file *filp, char __user *ubuf,
4557 size_t cnt, loff_t *ppos)
4558{
4559 return simple_read_from_buffer(ubuf, cnt, ppos,
4560 readme_msg, strlen(readme_msg));
4561}
4562
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004563static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004564 .open = tracing_open_generic,
4565 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004566 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004567};
4568
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004569static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004570{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004571 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004572
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004573 if (*pos || m->count)
4574 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004575
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004576 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004577
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004578 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4579 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004580 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004581 continue;
4582
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004583 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004584 }
4585
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004586 return NULL;
4587}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004588
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004589static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4590{
4591 void *v;
4592 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004593
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004594 preempt_disable();
4595 arch_spin_lock(&trace_cmdline_lock);
4596
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004597 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004598 while (l <= *pos) {
4599 v = saved_cmdlines_next(m, v, &l);
4600 if (!v)
4601 return NULL;
4602 }
4603
4604 return v;
4605}
4606
4607static void saved_cmdlines_stop(struct seq_file *m, void *v)
4608{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004609 arch_spin_unlock(&trace_cmdline_lock);
4610 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004611}
4612
4613static int saved_cmdlines_show(struct seq_file *m, void *v)
4614{
4615 char buf[TASK_COMM_LEN];
4616 unsigned int *pid = v;
4617
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004618 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004619 seq_printf(m, "%d %s\n", *pid, buf);
4620 return 0;
4621}
4622
4623static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4624 .start = saved_cmdlines_start,
4625 .next = saved_cmdlines_next,
4626 .stop = saved_cmdlines_stop,
4627 .show = saved_cmdlines_show,
4628};
4629
4630static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4631{
4632 if (tracing_disabled)
4633 return -ENODEV;
4634
4635 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004636}
4637
4638static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004639 .open = tracing_saved_cmdlines_open,
4640 .read = seq_read,
4641 .llseek = seq_lseek,
4642 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004643};
4644
4645static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004646tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4647 size_t cnt, loff_t *ppos)
4648{
4649 char buf[64];
4650 int r;
4651
4652 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004653 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004654 arch_spin_unlock(&trace_cmdline_lock);
4655
4656 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4657}
4658
4659static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4660{
4661 kfree(s->saved_cmdlines);
4662 kfree(s->map_cmdline_to_pid);
4663 kfree(s);
4664}
4665
4666static int tracing_resize_saved_cmdlines(unsigned int val)
4667{
4668 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4669
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004670 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004671 if (!s)
4672 return -ENOMEM;
4673
4674 if (allocate_cmdlines_buffer(val, s) < 0) {
4675 kfree(s);
4676 return -ENOMEM;
4677 }
4678
4679 arch_spin_lock(&trace_cmdline_lock);
4680 savedcmd_temp = savedcmd;
4681 savedcmd = s;
4682 arch_spin_unlock(&trace_cmdline_lock);
4683 free_saved_cmdlines_buffer(savedcmd_temp);
4684
4685 return 0;
4686}
4687
4688static ssize_t
4689tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4690 size_t cnt, loff_t *ppos)
4691{
4692 unsigned long val;
4693 int ret;
4694
4695 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4696 if (ret)
4697 return ret;
4698
4699 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4700 if (!val || val > PID_MAX_DEFAULT)
4701 return -EINVAL;
4702
4703 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4704 if (ret < 0)
4705 return ret;
4706
4707 *ppos += cnt;
4708
4709 return cnt;
4710}
4711
4712static const struct file_operations tracing_saved_cmdlines_size_fops = {
4713 .open = tracing_open_generic,
4714 .read = tracing_saved_cmdlines_size_read,
4715 .write = tracing_saved_cmdlines_size_write,
4716};
4717
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004718#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4719static union trace_enum_map_item *
4720update_enum_map(union trace_enum_map_item *ptr)
4721{
4722 if (!ptr->map.enum_string) {
4723 if (ptr->tail.next) {
4724 ptr = ptr->tail.next;
4725 /* Set ptr to the next real item (skip head) */
4726 ptr++;
4727 } else
4728 return NULL;
4729 }
4730 return ptr;
4731}
4732
4733static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4734{
4735 union trace_enum_map_item *ptr = v;
4736
4737 /*
4738 * Paranoid! If ptr points to end, we don't want to increment past it.
4739 * This really should never happen.
4740 */
4741 ptr = update_enum_map(ptr);
4742 if (WARN_ON_ONCE(!ptr))
4743 return NULL;
4744
4745 ptr++;
4746
4747 (*pos)++;
4748
4749 ptr = update_enum_map(ptr);
4750
4751 return ptr;
4752}
4753
4754static void *enum_map_start(struct seq_file *m, loff_t *pos)
4755{
4756 union trace_enum_map_item *v;
4757 loff_t l = 0;
4758
4759 mutex_lock(&trace_enum_mutex);
4760
4761 v = trace_enum_maps;
4762 if (v)
4763 v++;
4764
4765 while (v && l < *pos) {
4766 v = enum_map_next(m, v, &l);
4767 }
4768
4769 return v;
4770}
4771
4772static void enum_map_stop(struct seq_file *m, void *v)
4773{
4774 mutex_unlock(&trace_enum_mutex);
4775}
4776
4777static int enum_map_show(struct seq_file *m, void *v)
4778{
4779 union trace_enum_map_item *ptr = v;
4780
4781 seq_printf(m, "%s %ld (%s)\n",
4782 ptr->map.enum_string, ptr->map.enum_value,
4783 ptr->map.system);
4784
4785 return 0;
4786}
4787
4788static const struct seq_operations tracing_enum_map_seq_ops = {
4789 .start = enum_map_start,
4790 .next = enum_map_next,
4791 .stop = enum_map_stop,
4792 .show = enum_map_show,
4793};
4794
4795static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4796{
4797 if (tracing_disabled)
4798 return -ENODEV;
4799
4800 return seq_open(filp, &tracing_enum_map_seq_ops);
4801}
4802
4803static const struct file_operations tracing_enum_map_fops = {
4804 .open = tracing_enum_map_open,
4805 .read = seq_read,
4806 .llseek = seq_lseek,
4807 .release = seq_release,
4808};
4809
4810static inline union trace_enum_map_item *
4811trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4812{
4813 /* Return tail of array given the head */
4814 return ptr + ptr->head.length + 1;
4815}
4816
4817static void
4818trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4819 int len)
4820{
4821 struct trace_enum_map **stop;
4822 struct trace_enum_map **map;
4823 union trace_enum_map_item *map_array;
4824 union trace_enum_map_item *ptr;
4825
4826 stop = start + len;
4827
4828 /*
4829 * The trace_enum_maps contains the map plus a head and tail item,
4830 * where the head holds the module and length of array, and the
4831 * tail holds a pointer to the next list.
4832 */
4833 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4834 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004835 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004836 return;
4837 }
4838
4839 mutex_lock(&trace_enum_mutex);
4840
4841 if (!trace_enum_maps)
4842 trace_enum_maps = map_array;
4843 else {
4844 ptr = trace_enum_maps;
4845 for (;;) {
4846 ptr = trace_enum_jmp_to_tail(ptr);
4847 if (!ptr->tail.next)
4848 break;
4849 ptr = ptr->tail.next;
4850
4851 }
4852 ptr->tail.next = map_array;
4853 }
4854 map_array->head.mod = mod;
4855 map_array->head.length = len;
4856 map_array++;
4857
4858 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4859 map_array->map = **map;
4860 map_array++;
4861 }
4862 memset(map_array, 0, sizeof(*map_array));
4863
4864 mutex_unlock(&trace_enum_mutex);
4865}
4866
4867static void trace_create_enum_file(struct dentry *d_tracer)
4868{
4869 trace_create_file("enum_map", 0444, d_tracer,
4870 NULL, &tracing_enum_map_fops);
4871}
4872
4873#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4874static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4875static inline void trace_insert_enum_map_file(struct module *mod,
4876 struct trace_enum_map **start, int len) { }
4877#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4878
4879static void trace_insert_enum_map(struct module *mod,
4880 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004881{
4882 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004883
4884 if (len <= 0)
4885 return;
4886
4887 map = start;
4888
4889 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004890
4891 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004892}
4893
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004894static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004895tracing_set_trace_read(struct file *filp, char __user *ubuf,
4896 size_t cnt, loff_t *ppos)
4897{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004898 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004899 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004900 int r;
4901
4902 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004903 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004904 mutex_unlock(&trace_types_lock);
4905
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004906 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004907}
4908
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004909int tracer_init(struct tracer *t, struct trace_array *tr)
4910{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004911 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004912 return t->init(tr);
4913}
4914
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004915static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004916{
4917 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004918
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004919 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004920 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004921}
4922
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004923#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004924/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004925static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4926 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004927{
4928 int cpu, ret = 0;
4929
4930 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4931 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004932 ret = ring_buffer_resize(trace_buf->buffer,
4933 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004934 if (ret < 0)
4935 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004936 per_cpu_ptr(trace_buf->data, cpu)->entries =
4937 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004938 }
4939 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004940 ret = ring_buffer_resize(trace_buf->buffer,
4941 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004942 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004943 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4944 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004945 }
4946
4947 return ret;
4948}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004949#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004950
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004951static int __tracing_resize_ring_buffer(struct trace_array *tr,
4952 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004953{
4954 int ret;
4955
4956 /*
4957 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004958 * we use the size that was given, and we can forget about
4959 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004960 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004961 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004962
Steven Rostedtb382ede62012-10-10 21:44:34 -04004963 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004964 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004965 return 0;
4966
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004967 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004968 if (ret < 0)
4969 return ret;
4970
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004971#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004972 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4973 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004974 goto out;
4975
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004976 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004977 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004978 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4979 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004980 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004981 /*
4982 * AARGH! We are left with different
4983 * size max buffer!!!!
4984 * The max buffer is our "snapshot" buffer.
4985 * When a tracer needs a snapshot (one of the
4986 * latency tracers), it swaps the max buffer
4987 * with the saved snap shot. We succeeded to
4988 * update the size of the main buffer, but failed to
4989 * update the size of the max buffer. But when we tried
4990 * to reset the main buffer to the original size, we
4991 * failed there too. This is very unlikely to
4992 * happen, but if it does, warn and kill all
4993 * tracing.
4994 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004995 WARN_ON(1);
4996 tracing_disabled = 1;
4997 }
4998 return ret;
4999 }
5000
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005001 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005002 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005003 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005004 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005005
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005006 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005007#endif /* CONFIG_TRACER_MAX_TRACE */
5008
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005009 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005010 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005011 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005012 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005013
5014 return ret;
5015}
5016
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005017static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5018 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005019{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005020 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005021
5022 mutex_lock(&trace_types_lock);
5023
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005024 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5025 /* make sure, this cpu is enabled in the mask */
5026 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5027 ret = -EINVAL;
5028 goto out;
5029 }
5030 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005031
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005032 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005033 if (ret < 0)
5034 ret = -ENOMEM;
5035
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005036out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005037 mutex_unlock(&trace_types_lock);
5038
5039 return ret;
5040}
5041
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005042
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005043/**
5044 * tracing_update_buffers - used by tracing facility to expand ring buffers
5045 *
5046 * To save on memory when the tracing is never used on a system with it
5047 * configured in. The ring buffers are set to a minimum size. But once
5048 * a user starts to use the tracing facility, then they need to grow
5049 * to their default size.
5050 *
5051 * This function is to be called when a tracer is about to be used.
5052 */
5053int tracing_update_buffers(void)
5054{
5055 int ret = 0;
5056
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005057 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005058 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005059 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005060 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005061 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005062
5063 return ret;
5064}
5065
Steven Rostedt577b7852009-02-26 23:43:05 -05005066struct trace_option_dentry;
5067
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005068static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005069create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005070
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005071/*
5072 * Used to clear out the tracer before deletion of an instance.
5073 * Must have trace_types_lock held.
5074 */
5075static void tracing_set_nop(struct trace_array *tr)
5076{
5077 if (tr->current_trace == &nop_trace)
5078 return;
5079
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005080 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005081
5082 if (tr->current_trace->reset)
5083 tr->current_trace->reset(tr);
5084
5085 tr->current_trace = &nop_trace;
5086}
5087
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005088static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005089{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005090 /* Only enable if the directory has been created already. */
5091 if (!tr->dir)
5092 return;
5093
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005094 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005095}
5096
5097static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5098{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005099 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005100#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005101 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005102#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005103 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005104
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005105 mutex_lock(&trace_types_lock);
5106
Steven Rostedt73c51622009-03-11 13:42:01 -04005107 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005108 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005109 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005110 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005111 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005112 ret = 0;
5113 }
5114
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005115 for (t = trace_types; t; t = t->next) {
5116 if (strcmp(t->name, buf) == 0)
5117 break;
5118 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005119 if (!t) {
5120 ret = -EINVAL;
5121 goto out;
5122 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005123 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005124 goto out;
5125
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005126 /* Some tracers are only allowed for the top level buffer */
5127 if (!trace_ok_for_array(t, tr)) {
5128 ret = -EINVAL;
5129 goto out;
5130 }
5131
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005132 /* If trace pipe files are being read, we can't change the tracer */
5133 if (tr->current_trace->ref) {
5134 ret = -EBUSY;
5135 goto out;
5136 }
5137
Steven Rostedt9f029e82008-11-12 15:24:24 -05005138 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005139
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005140 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005141
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005142 if (tr->current_trace->reset)
5143 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005144
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005145 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005146 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005147
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005148#ifdef CONFIG_TRACER_MAX_TRACE
5149 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005150
5151 if (had_max_tr && !t->use_max_tr) {
5152 /*
5153 * We need to make sure that the update_max_tr sees that
5154 * current_trace changed to nop_trace to keep it from
5155 * swapping the buffers after we resize it.
5156 * The update_max_tr is called from interrupts disabled
5157 * so a synchronized_sched() is sufficient.
5158 */
5159 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005160 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005161 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005162#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005163
5164#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005165 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005166 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005167 if (ret < 0)
5168 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005169 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005170#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005171
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005172 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005173 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005174 if (ret)
5175 goto out;
5176 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005177
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005178 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005179 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005180 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005181 out:
5182 mutex_unlock(&trace_types_lock);
5183
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005184 return ret;
5185}
5186
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005187static ssize_t
5188tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5189 size_t cnt, loff_t *ppos)
5190{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005191 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005192 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005193 int i;
5194 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005195 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005196
Steven Rostedt60063a62008-10-28 10:44:24 -04005197 ret = cnt;
5198
Li Zefanee6c2c12009-09-18 14:06:47 +08005199 if (cnt > MAX_TRACER_SIZE)
5200 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005201
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005202 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005203 return -EFAULT;
5204
5205 buf[cnt] = 0;
5206
5207 /* strip ending whitespace. */
5208 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5209 buf[i] = 0;
5210
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005211 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005212 if (err)
5213 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005214
Jiri Olsacf8517c2009-10-23 19:36:16 -04005215 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005216
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005217 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005218}
5219
5220static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005221tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5222 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005223{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005224 char buf[64];
5225 int r;
5226
Steven Rostedtcffae432008-05-12 21:21:00 +02005227 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005228 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005229 if (r > sizeof(buf))
5230 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005231 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005232}
5233
5234static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005235tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5236 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005237{
Hannes Eder5e398412009-02-10 19:44:34 +01005238 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005239 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005240
Peter Huewe22fe9b52011-06-07 21:58:27 +02005241 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5242 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005243 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005244
5245 *ptr = val * 1000;
5246
5247 return cnt;
5248}
5249
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005250static ssize_t
5251tracing_thresh_read(struct file *filp, char __user *ubuf,
5252 size_t cnt, loff_t *ppos)
5253{
5254 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5255}
5256
5257static ssize_t
5258tracing_thresh_write(struct file *filp, const char __user *ubuf,
5259 size_t cnt, loff_t *ppos)
5260{
5261 struct trace_array *tr = filp->private_data;
5262 int ret;
5263
5264 mutex_lock(&trace_types_lock);
5265 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5266 if (ret < 0)
5267 goto out;
5268
5269 if (tr->current_trace->update_thresh) {
5270 ret = tr->current_trace->update_thresh(tr);
5271 if (ret < 0)
5272 goto out;
5273 }
5274
5275 ret = cnt;
5276out:
5277 mutex_unlock(&trace_types_lock);
5278
5279 return ret;
5280}
5281
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005282#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005283
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005284static ssize_t
5285tracing_max_lat_read(struct file *filp, char __user *ubuf,
5286 size_t cnt, loff_t *ppos)
5287{
5288 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5289}
5290
5291static ssize_t
5292tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5293 size_t cnt, loff_t *ppos)
5294{
5295 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5296}
5297
Chen Gange428abb2015-11-10 05:15:15 +08005298#endif
5299
Steven Rostedtb3806b42008-05-12 21:20:46 +02005300static int tracing_open_pipe(struct inode *inode, struct file *filp)
5301{
Oleg Nesterov15544202013-07-23 17:25:57 +02005302 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005303 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005304 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005305
5306 if (tracing_disabled)
5307 return -ENODEV;
5308
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005309 if (trace_array_get(tr) < 0)
5310 return -ENODEV;
5311
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005312 mutex_lock(&trace_types_lock);
5313
Steven Rostedtb3806b42008-05-12 21:20:46 +02005314 /* create a buffer to store the information to pass to userspace */
5315 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005316 if (!iter) {
5317 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005318 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005319 goto out;
5320 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005321
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005322 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005323 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005324
5325 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5326 ret = -ENOMEM;
5327 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305328 }
5329
Steven Rostedta3097202008-11-07 22:36:02 -05005330 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305331 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005332
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005333 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005334 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5335
David Sharp8be07092012-11-13 12:18:22 -08005336 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005337 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005338 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5339
Oleg Nesterov15544202013-07-23 17:25:57 +02005340 iter->tr = tr;
5341 iter->trace_buffer = &tr->trace_buffer;
5342 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005343 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005344 filp->private_data = iter;
5345
Steven Rostedt107bad82008-05-12 21:21:01 +02005346 if (iter->trace->pipe_open)
5347 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005348
Arnd Bergmannb4447862010-07-07 23:40:11 +02005349 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005350
5351 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005352out:
5353 mutex_unlock(&trace_types_lock);
5354 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005355
5356fail:
5357 kfree(iter->trace);
5358 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005359 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005360 mutex_unlock(&trace_types_lock);
5361 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005362}
5363
5364static int tracing_release_pipe(struct inode *inode, struct file *file)
5365{
5366 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005367 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005368
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005369 mutex_lock(&trace_types_lock);
5370
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005371 tr->current_trace->ref--;
5372
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005373 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005374 iter->trace->pipe_close(iter);
5375
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005376 mutex_unlock(&trace_types_lock);
5377
Rusty Russell44623442009-01-01 10:12:23 +10305378 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005379 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005380 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005381
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005382 trace_array_put(tr);
5383
Steven Rostedtb3806b42008-05-12 21:20:46 +02005384 return 0;
5385}
5386
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005387static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005388trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005389{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005390 struct trace_array *tr = iter->tr;
5391
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005392 /* Iterators are static, they should be filled or empty */
5393 if (trace_buffer_iter(iter, iter->cpu_file))
5394 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005395
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005396 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005397 /*
5398 * Always select as readable when in blocking mode
5399 */
5400 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005401 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005402 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005403 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005404}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005405
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005406static unsigned int
5407tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5408{
5409 struct trace_iterator *iter = filp->private_data;
5410
5411 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005412}
5413
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005414/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005415static int tracing_wait_pipe(struct file *filp)
5416{
5417 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005418 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005419
5420 while (trace_empty(iter)) {
5421
5422 if ((filp->f_flags & O_NONBLOCK)) {
5423 return -EAGAIN;
5424 }
5425
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005426 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005427 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005428 * We still block if tracing is disabled, but we have never
5429 * read anything. This allows a user to cat this file, and
5430 * then enable tracing. But after we have read something,
5431 * we give an EOF when tracing is again disabled.
5432 *
5433 * iter->pos will be 0 if we haven't read anything.
5434 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005435 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005436 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005437
5438 mutex_unlock(&iter->mutex);
5439
Rabin Vincente30f53a2014-11-10 19:46:34 +01005440 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005441
5442 mutex_lock(&iter->mutex);
5443
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005444 if (ret)
5445 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005446 }
5447
5448 return 1;
5449}
5450
Steven Rostedtb3806b42008-05-12 21:20:46 +02005451/*
5452 * Consumer reader.
5453 */
5454static ssize_t
5455tracing_read_pipe(struct file *filp, char __user *ubuf,
5456 size_t cnt, loff_t *ppos)
5457{
5458 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005459 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005460
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005461 /*
5462 * Avoid more than one consumer on a single file descriptor
5463 * This is just a matter of traces coherency, the ring buffer itself
5464 * is protected.
5465 */
5466 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005467
5468 /* return any leftover data */
5469 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5470 if (sret != -EBUSY)
5471 goto out;
5472
5473 trace_seq_init(&iter->seq);
5474
Steven Rostedt107bad82008-05-12 21:21:01 +02005475 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005476 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5477 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005478 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005479 }
5480
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005481waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005482 sret = tracing_wait_pipe(filp);
5483 if (sret <= 0)
5484 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005485
5486 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005487 if (trace_empty(iter)) {
5488 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005489 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005490 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005491
5492 if (cnt >= PAGE_SIZE)
5493 cnt = PAGE_SIZE - 1;
5494
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005495 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005496 memset(&iter->seq, 0,
5497 sizeof(struct trace_iterator) -
5498 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005499 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005500 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005501
Lai Jiangshan4f535962009-05-18 19:35:34 +08005502 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005503 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005504 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005505 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005506 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005507
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005508 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005509 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005510 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005511 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005512 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005513 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005514 if (ret != TRACE_TYPE_NO_CONSUME)
5515 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005516
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005517 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005518 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005519
5520 /*
5521 * Setting the full flag means we reached the trace_seq buffer
5522 * size and we should leave by partial output condition above.
5523 * One of the trace_seq_* functions is not used properly.
5524 */
5525 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5526 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005527 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005528 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005529 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005530
Steven Rostedtb3806b42008-05-12 21:20:46 +02005531 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005532 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005533 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005534 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005535
5536 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005537 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005538 * entries, go back to wait for more entries.
5539 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005540 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005541 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005542
Steven Rostedt107bad82008-05-12 21:21:01 +02005543out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005544 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005545
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005546 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005547}
5548
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005549static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5550 unsigned int idx)
5551{
5552 __free_page(spd->pages[idx]);
5553}
5554
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005555static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005556 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005557 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005558 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005559 .steal = generic_pipe_buf_steal,
5560 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005561};
5562
Steven Rostedt34cd4992009-02-09 12:06:29 -05005563static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005564tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005565{
5566 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005567 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005568 int ret;
5569
5570 /* Seq buffer is page-sized, exactly what we need. */
5571 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005572 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005573 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005574
5575 if (trace_seq_has_overflowed(&iter->seq)) {
5576 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005577 break;
5578 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005579
5580 /*
5581 * This should not be hit, because it should only
5582 * be set if the iter->seq overflowed. But check it
5583 * anyway to be safe.
5584 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005585 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005586 iter->seq.seq.len = save_len;
5587 break;
5588 }
5589
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005590 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005591 if (rem < count) {
5592 rem = 0;
5593 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005594 break;
5595 }
5596
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005597 if (ret != TRACE_TYPE_NO_CONSUME)
5598 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005599 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005600 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005601 rem = 0;
5602 iter->ent = NULL;
5603 break;
5604 }
5605 }
5606
5607 return rem;
5608}
5609
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005610static ssize_t tracing_splice_read_pipe(struct file *filp,
5611 loff_t *ppos,
5612 struct pipe_inode_info *pipe,
5613 size_t len,
5614 unsigned int flags)
5615{
Jens Axboe35f3d142010-05-20 10:43:18 +02005616 struct page *pages_def[PIPE_DEF_BUFFERS];
5617 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005618 struct trace_iterator *iter = filp->private_data;
5619 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005620 .pages = pages_def,
5621 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005622 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005623 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005624 .flags = flags,
5625 .ops = &tracing_pipe_buf_ops,
5626 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005627 };
5628 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005629 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005630 unsigned int i;
5631
Jens Axboe35f3d142010-05-20 10:43:18 +02005632 if (splice_grow_spd(pipe, &spd))
5633 return -ENOMEM;
5634
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005635 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005636
5637 if (iter->trace->splice_read) {
5638 ret = iter->trace->splice_read(iter, filp,
5639 ppos, pipe, len, flags);
5640 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005641 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005642 }
5643
5644 ret = tracing_wait_pipe(filp);
5645 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005646 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005647
Jason Wessel955b61e2010-08-05 09:22:23 -05005648 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005649 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005650 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005651 }
5652
Lai Jiangshan4f535962009-05-18 19:35:34 +08005653 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005654 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005655
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005656 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005657 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005658 spd.pages[i] = alloc_page(GFP_KERNEL);
5659 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005660 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005661
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005662 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005663
5664 /* Copy the data into the page, so we can start over. */
5665 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005666 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005667 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005668 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005669 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005670 break;
5671 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005672 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005673 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005674
Steven Rostedtf9520752009-03-02 14:04:40 -05005675 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005676 }
5677
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005678 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005679 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005680 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005681
5682 spd.nr_pages = i;
5683
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005684 if (i)
5685 ret = splice_to_pipe(pipe, &spd);
5686 else
5687 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005688out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005689 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005690 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005691
Steven Rostedt34cd4992009-02-09 12:06:29 -05005692out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005693 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005694 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005695}
5696
Steven Rostedta98a3c32008-05-12 21:20:59 +02005697static ssize_t
5698tracing_entries_read(struct file *filp, char __user *ubuf,
5699 size_t cnt, loff_t *ppos)
5700{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005701 struct inode *inode = file_inode(filp);
5702 struct trace_array *tr = inode->i_private;
5703 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005704 char buf[64];
5705 int r = 0;
5706 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005707
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005708 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005709
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005710 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005711 int cpu, buf_size_same;
5712 unsigned long size;
5713
5714 size = 0;
5715 buf_size_same = 1;
5716 /* check if all cpu sizes are same */
5717 for_each_tracing_cpu(cpu) {
5718 /* fill in the size from first enabled cpu */
5719 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005720 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5721 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005722 buf_size_same = 0;
5723 break;
5724 }
5725 }
5726
5727 if (buf_size_same) {
5728 if (!ring_buffer_expanded)
5729 r = sprintf(buf, "%lu (expanded: %lu)\n",
5730 size >> 10,
5731 trace_buf_size >> 10);
5732 else
5733 r = sprintf(buf, "%lu\n", size >> 10);
5734 } else
5735 r = sprintf(buf, "X\n");
5736 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005737 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005738
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005739 mutex_unlock(&trace_types_lock);
5740
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005741 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5742 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005743}
5744
5745static ssize_t
5746tracing_entries_write(struct file *filp, const char __user *ubuf,
5747 size_t cnt, loff_t *ppos)
5748{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005749 struct inode *inode = file_inode(filp);
5750 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005751 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005752 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005753
Peter Huewe22fe9b52011-06-07 21:58:27 +02005754 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5755 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005756 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005757
5758 /* must have at least 1 entry */
5759 if (!val)
5760 return -EINVAL;
5761
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005762 /* value is in KB */
5763 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005764 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005765 if (ret < 0)
5766 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005767
Jiri Olsacf8517c2009-10-23 19:36:16 -04005768 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005769
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005770 return cnt;
5771}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005772
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005773static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005774tracing_total_entries_read(struct file *filp, char __user *ubuf,
5775 size_t cnt, loff_t *ppos)
5776{
5777 struct trace_array *tr = filp->private_data;
5778 char buf[64];
5779 int r, cpu;
5780 unsigned long size = 0, expanded_size = 0;
5781
5782 mutex_lock(&trace_types_lock);
5783 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005784 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005785 if (!ring_buffer_expanded)
5786 expanded_size += trace_buf_size >> 10;
5787 }
5788 if (ring_buffer_expanded)
5789 r = sprintf(buf, "%lu\n", size);
5790 else
5791 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5792 mutex_unlock(&trace_types_lock);
5793
5794 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5795}
5796
5797static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005798tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5799 size_t cnt, loff_t *ppos)
5800{
5801 /*
5802 * There is no need to read what the user has written, this function
5803 * is just to make sure that there is no error when "echo" is used
5804 */
5805
5806 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005807
5808 return cnt;
5809}
5810
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005811static int
5812tracing_free_buffer_release(struct inode *inode, struct file *filp)
5813{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005814 struct trace_array *tr = inode->i_private;
5815
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005816 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005817 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005818 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005819 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005820 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005821
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005822 trace_array_put(tr);
5823
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005824 return 0;
5825}
5826
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005827static ssize_t
5828tracing_mark_write(struct file *filp, const char __user *ubuf,
5829 size_t cnt, loff_t *fpos)
5830{
Alexander Z Lam2d716192013-07-01 15:31:24 -07005831 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005832 struct ring_buffer_event *event;
5833 struct ring_buffer *buffer;
5834 struct print_entry *entry;
5835 unsigned long irq_flags;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005836 const char faulted[] = "<faulted>";
Steven Rostedtd696b582011-09-22 11:50:27 -04005837 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005838 int size;
5839 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005840
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005841/* Used in tracing_mark_raw_write() as well */
5842#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005843
Steven Rostedtc76f0692008-11-07 22:36:02 -05005844 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005845 return -EINVAL;
5846
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005847 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005848 return -EINVAL;
5849
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005850 if (cnt > TRACE_BUF_SIZE)
5851 cnt = TRACE_BUF_SIZE;
5852
Steven Rostedtd696b582011-09-22 11:50:27 -04005853 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005854
Steven Rostedtd696b582011-09-22 11:50:27 -04005855 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005856 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
5857
5858 /* If less than "<faulted>", then make sure we can still add that */
5859 if (cnt < FAULTED_SIZE)
5860 size += FAULTED_SIZE - cnt;
5861
Alexander Z Lam2d716192013-07-01 15:31:24 -07005862 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05005863 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5864 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005865 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04005866 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005867 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04005868
5869 entry = ring_buffer_event_data(event);
5870 entry->ip = _THIS_IP_;
5871
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005872 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
5873 if (len) {
5874 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5875 cnt = FAULTED_SIZE;
5876 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04005877 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005878 written = cnt;
5879 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04005880
5881 if (entry->buf[cnt - 1] != '\n') {
5882 entry->buf[cnt] = '\n';
5883 entry->buf[cnt + 1] = '\0';
5884 } else
5885 entry->buf[cnt] = '\0';
5886
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005887 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005888
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005889 if (written > 0)
5890 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005891
Steven Rostedtfa32e852016-07-06 15:25:08 -04005892 return written;
5893}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005894
Steven Rostedtfa32e852016-07-06 15:25:08 -04005895/* Limit it for now to 3K (including tag) */
5896#define RAW_DATA_MAX_SIZE (1024*3)
5897
5898static ssize_t
5899tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
5900 size_t cnt, loff_t *fpos)
5901{
5902 struct trace_array *tr = filp->private_data;
5903 struct ring_buffer_event *event;
5904 struct ring_buffer *buffer;
5905 struct raw_data_entry *entry;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005906 const char faulted[] = "<faulted>";
Steven Rostedtfa32e852016-07-06 15:25:08 -04005907 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005908 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005909 int size;
5910 int len;
5911
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005912#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
5913
Steven Rostedtfa32e852016-07-06 15:25:08 -04005914 if (tracing_disabled)
5915 return -EINVAL;
5916
5917 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5918 return -EINVAL;
5919
5920 /* The marker must at least have a tag id */
5921 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
5922 return -EINVAL;
5923
5924 if (cnt > TRACE_BUF_SIZE)
5925 cnt = TRACE_BUF_SIZE;
5926
5927 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5928
Steven Rostedtfa32e852016-07-06 15:25:08 -04005929 local_save_flags(irq_flags);
5930 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005931 if (cnt < FAULT_SIZE_ID)
5932 size += FAULT_SIZE_ID - cnt;
5933
Steven Rostedtfa32e852016-07-06 15:25:08 -04005934 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05005935 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
5936 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005937 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04005938 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005939 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005940
5941 entry = ring_buffer_event_data(event);
5942
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005943 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
5944 if (len) {
5945 entry->id = -1;
5946 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5947 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005948 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005949 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005950
5951 __buffer_unlock_commit(buffer, event);
5952
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005953 if (written > 0)
5954 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005955
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005956 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005957}
5958
Li Zefan13f16d22009-12-08 11:16:11 +08005959static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005960{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005961 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005962 int i;
5963
5964 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005965 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005966 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005967 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5968 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005969 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005970
Li Zefan13f16d22009-12-08 11:16:11 +08005971 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005972}
5973
Steven Rostedte1e232c2014-02-10 23:38:46 -05005974static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005975{
Zhaolei5079f322009-08-25 16:12:56 +08005976 int i;
5977
Zhaolei5079f322009-08-25 16:12:56 +08005978 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5979 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5980 break;
5981 }
5982 if (i == ARRAY_SIZE(trace_clocks))
5983 return -EINVAL;
5984
Zhaolei5079f322009-08-25 16:12:56 +08005985 mutex_lock(&trace_types_lock);
5986
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005987 tr->clock_id = i;
5988
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005989 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005990
David Sharp60303ed2012-10-11 16:27:52 -07005991 /*
5992 * New clock may not be consistent with the previous clock.
5993 * Reset the buffer so that it doesn't have incomparable timestamps.
5994 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005995 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005996
5997#ifdef CONFIG_TRACER_MAX_TRACE
5998 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5999 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006000 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006001#endif
David Sharp60303ed2012-10-11 16:27:52 -07006002
Zhaolei5079f322009-08-25 16:12:56 +08006003 mutex_unlock(&trace_types_lock);
6004
Steven Rostedte1e232c2014-02-10 23:38:46 -05006005 return 0;
6006}
6007
6008static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6009 size_t cnt, loff_t *fpos)
6010{
6011 struct seq_file *m = filp->private_data;
6012 struct trace_array *tr = m->private;
6013 char buf[64];
6014 const char *clockstr;
6015 int ret;
6016
6017 if (cnt >= sizeof(buf))
6018 return -EINVAL;
6019
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006020 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006021 return -EFAULT;
6022
6023 buf[cnt] = 0;
6024
6025 clockstr = strstrip(buf);
6026
6027 ret = tracing_set_clock(tr, clockstr);
6028 if (ret)
6029 return ret;
6030
Zhaolei5079f322009-08-25 16:12:56 +08006031 *fpos += cnt;
6032
6033 return cnt;
6034}
6035
Li Zefan13f16d22009-12-08 11:16:11 +08006036static int tracing_clock_open(struct inode *inode, struct file *file)
6037{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006038 struct trace_array *tr = inode->i_private;
6039 int ret;
6040
Li Zefan13f16d22009-12-08 11:16:11 +08006041 if (tracing_disabled)
6042 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006043
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006044 if (trace_array_get(tr))
6045 return -ENODEV;
6046
6047 ret = single_open(file, tracing_clock_show, inode->i_private);
6048 if (ret < 0)
6049 trace_array_put(tr);
6050
6051 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006052}
6053
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006054struct ftrace_buffer_info {
6055 struct trace_iterator iter;
6056 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006057 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006058 unsigned int read;
6059};
6060
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006061#ifdef CONFIG_TRACER_SNAPSHOT
6062static int tracing_snapshot_open(struct inode *inode, struct file *file)
6063{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006064 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006065 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006066 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006067 int ret = 0;
6068
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006069 if (trace_array_get(tr) < 0)
6070 return -ENODEV;
6071
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006072 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006073 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006074 if (IS_ERR(iter))
6075 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006076 } else {
6077 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006078 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006079 m = kzalloc(sizeof(*m), GFP_KERNEL);
6080 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006081 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006082 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6083 if (!iter) {
6084 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006085 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006086 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006087 ret = 0;
6088
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006089 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006090 iter->trace_buffer = &tr->max_buffer;
6091 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006092 m->private = iter;
6093 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006094 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006095out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006096 if (ret < 0)
6097 trace_array_put(tr);
6098
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006099 return ret;
6100}
6101
6102static ssize_t
6103tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6104 loff_t *ppos)
6105{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006106 struct seq_file *m = filp->private_data;
6107 struct trace_iterator *iter = m->private;
6108 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006109 unsigned long val;
6110 int ret;
6111
6112 ret = tracing_update_buffers();
6113 if (ret < 0)
6114 return ret;
6115
6116 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6117 if (ret)
6118 return ret;
6119
6120 mutex_lock(&trace_types_lock);
6121
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006122 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006123 ret = -EBUSY;
6124 goto out;
6125 }
6126
6127 switch (val) {
6128 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006129 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6130 ret = -EINVAL;
6131 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006132 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006133 if (tr->allocated_snapshot)
6134 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006135 break;
6136 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006137/* Only allow per-cpu swap if the ring buffer supports it */
6138#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6139 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6140 ret = -EINVAL;
6141 break;
6142 }
6143#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006144 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006145 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006146 if (ret < 0)
6147 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006148 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006149 local_irq_disable();
6150 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006151 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006152 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006153 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006154 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006155 local_irq_enable();
6156 break;
6157 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006158 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006159 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6160 tracing_reset_online_cpus(&tr->max_buffer);
6161 else
6162 tracing_reset(&tr->max_buffer, iter->cpu_file);
6163 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006164 break;
6165 }
6166
6167 if (ret >= 0) {
6168 *ppos += cnt;
6169 ret = cnt;
6170 }
6171out:
6172 mutex_unlock(&trace_types_lock);
6173 return ret;
6174}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006175
6176static int tracing_snapshot_release(struct inode *inode, struct file *file)
6177{
6178 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006179 int ret;
6180
6181 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006182
6183 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006184 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006185
6186 /* If write only, the seq_file is just a stub */
6187 if (m)
6188 kfree(m->private);
6189 kfree(m);
6190
6191 return 0;
6192}
6193
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006194static int tracing_buffers_open(struct inode *inode, struct file *filp);
6195static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6196 size_t count, loff_t *ppos);
6197static int tracing_buffers_release(struct inode *inode, struct file *file);
6198static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6199 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6200
6201static int snapshot_raw_open(struct inode *inode, struct file *filp)
6202{
6203 struct ftrace_buffer_info *info;
6204 int ret;
6205
6206 ret = tracing_buffers_open(inode, filp);
6207 if (ret < 0)
6208 return ret;
6209
6210 info = filp->private_data;
6211
6212 if (info->iter.trace->use_max_tr) {
6213 tracing_buffers_release(inode, filp);
6214 return -EBUSY;
6215 }
6216
6217 info->iter.snapshot = true;
6218 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6219
6220 return ret;
6221}
6222
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006223#endif /* CONFIG_TRACER_SNAPSHOT */
6224
6225
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006226static const struct file_operations tracing_thresh_fops = {
6227 .open = tracing_open_generic,
6228 .read = tracing_thresh_read,
6229 .write = tracing_thresh_write,
6230 .llseek = generic_file_llseek,
6231};
6232
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006233#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006234static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006235 .open = tracing_open_generic,
6236 .read = tracing_max_lat_read,
6237 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006238 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006239};
Chen Gange428abb2015-11-10 05:15:15 +08006240#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006241
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006242static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006243 .open = tracing_open_generic,
6244 .read = tracing_set_trace_read,
6245 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006246 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006247};
6248
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006249static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006250 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006251 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006252 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006253 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006254 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006255 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006256};
6257
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006258static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006259 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006260 .read = tracing_entries_read,
6261 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006262 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006263 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006264};
6265
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006266static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006267 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006268 .read = tracing_total_entries_read,
6269 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006270 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006271};
6272
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006273static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006274 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006275 .write = tracing_free_buffer_write,
6276 .release = tracing_free_buffer_release,
6277};
6278
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006279static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006280 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006281 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006282 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006283 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006284};
6285
Steven Rostedtfa32e852016-07-06 15:25:08 -04006286static const struct file_operations tracing_mark_raw_fops = {
6287 .open = tracing_open_generic_tr,
6288 .write = tracing_mark_raw_write,
6289 .llseek = generic_file_llseek,
6290 .release = tracing_release_generic_tr,
6291};
6292
Zhaolei5079f322009-08-25 16:12:56 +08006293static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006294 .open = tracing_clock_open,
6295 .read = seq_read,
6296 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006297 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006298 .write = tracing_clock_write,
6299};
6300
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006301#ifdef CONFIG_TRACER_SNAPSHOT
6302static const struct file_operations snapshot_fops = {
6303 .open = tracing_snapshot_open,
6304 .read = seq_read,
6305 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006306 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006307 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006308};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006309
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006310static const struct file_operations snapshot_raw_fops = {
6311 .open = snapshot_raw_open,
6312 .read = tracing_buffers_read,
6313 .release = tracing_buffers_release,
6314 .splice_read = tracing_buffers_splice_read,
6315 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006316};
6317
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006318#endif /* CONFIG_TRACER_SNAPSHOT */
6319
Steven Rostedt2cadf912008-12-01 22:20:19 -05006320static int tracing_buffers_open(struct inode *inode, struct file *filp)
6321{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006322 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006323 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006324 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006325
6326 if (tracing_disabled)
6327 return -ENODEV;
6328
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006329 if (trace_array_get(tr) < 0)
6330 return -ENODEV;
6331
Steven Rostedt2cadf912008-12-01 22:20:19 -05006332 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006333 if (!info) {
6334 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006335 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006336 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006337
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006338 mutex_lock(&trace_types_lock);
6339
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006340 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006341 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006342 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006343 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006344 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006345 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006346 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006347
6348 filp->private_data = info;
6349
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006350 tr->current_trace->ref++;
6351
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006352 mutex_unlock(&trace_types_lock);
6353
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006354 ret = nonseekable_open(inode, filp);
6355 if (ret < 0)
6356 trace_array_put(tr);
6357
6358 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006359}
6360
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006361static unsigned int
6362tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6363{
6364 struct ftrace_buffer_info *info = filp->private_data;
6365 struct trace_iterator *iter = &info->iter;
6366
6367 return trace_poll(iter, filp, poll_table);
6368}
6369
Steven Rostedt2cadf912008-12-01 22:20:19 -05006370static ssize_t
6371tracing_buffers_read(struct file *filp, char __user *ubuf,
6372 size_t count, loff_t *ppos)
6373{
6374 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006375 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006376 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006377 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006378
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006379 if (!count)
6380 return 0;
6381
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006382#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006383 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6384 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006385#endif
6386
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006387 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006388 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6389 iter->cpu_file);
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006390 info->spare_cpu = iter->cpu_file;
6391 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006392 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006393 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006394
Steven Rostedt2cadf912008-12-01 22:20:19 -05006395 /* Do we have previous read data to read? */
6396 if (info->read < PAGE_SIZE)
6397 goto read;
6398
Steven Rostedtb6273442013-02-28 13:44:11 -05006399 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006400 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006401 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006402 &info->spare,
6403 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006404 iter->cpu_file, 0);
6405 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006406
6407 if (ret < 0) {
6408 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006409 if ((filp->f_flags & O_NONBLOCK))
6410 return -EAGAIN;
6411
Rabin Vincente30f53a2014-11-10 19:46:34 +01006412 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006413 if (ret)
6414 return ret;
6415
Steven Rostedtb6273442013-02-28 13:44:11 -05006416 goto again;
6417 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006418 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006419 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006420
Steven Rostedt436fc282011-10-14 10:44:25 -04006421 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006422 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006423 size = PAGE_SIZE - info->read;
6424 if (size > count)
6425 size = count;
6426
6427 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006428 if (ret == size)
6429 return -EFAULT;
6430
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006431 size -= ret;
6432
Steven Rostedt2cadf912008-12-01 22:20:19 -05006433 *ppos += size;
6434 info->read += size;
6435
6436 return size;
6437}
6438
6439static int tracing_buffers_release(struct inode *inode, struct file *file)
6440{
6441 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006442 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006443
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006444 mutex_lock(&trace_types_lock);
6445
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006446 iter->tr->current_trace->ref--;
6447
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006448 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006449
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006450 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006451 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6452 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006453 kfree(info);
6454
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006455 mutex_unlock(&trace_types_lock);
6456
Steven Rostedt2cadf912008-12-01 22:20:19 -05006457 return 0;
6458}
6459
6460struct buffer_ref {
6461 struct ring_buffer *buffer;
6462 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006463 int cpu;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006464 int ref;
6465};
6466
6467static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6468 struct pipe_buffer *buf)
6469{
6470 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6471
6472 if (--ref->ref)
6473 return;
6474
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006475 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006476 kfree(ref);
6477 buf->private = 0;
6478}
6479
Steven Rostedt2cadf912008-12-01 22:20:19 -05006480static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6481 struct pipe_buffer *buf)
6482{
6483 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6484
6485 ref->ref++;
6486}
6487
6488/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006489static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006490 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006491 .confirm = generic_pipe_buf_confirm,
6492 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006493 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006494 .get = buffer_pipe_buf_get,
6495};
6496
6497/*
6498 * Callback from splice_to_pipe(), if we need to release some pages
6499 * at the end of the spd in case we error'ed out in filling the pipe.
6500 */
6501static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6502{
6503 struct buffer_ref *ref =
6504 (struct buffer_ref *)spd->partial[i].private;
6505
6506 if (--ref->ref)
6507 return;
6508
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006509 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006510 kfree(ref);
6511 spd->partial[i].private = 0;
6512}
6513
6514static ssize_t
6515tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6516 struct pipe_inode_info *pipe, size_t len,
6517 unsigned int flags)
6518{
6519 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006520 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006521 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6522 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006523 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006524 .pages = pages_def,
6525 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006526 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006527 .flags = flags,
6528 .ops = &buffer_pipe_buf_ops,
6529 .spd_release = buffer_spd_release,
6530 };
6531 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04006532 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006533 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006534
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006535#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006536 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6537 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006538#endif
6539
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006540 if (*ppos & (PAGE_SIZE - 1))
6541 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006542
6543 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006544 if (len < PAGE_SIZE)
6545 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006546 len &= PAGE_MASK;
6547 }
6548
Al Viro1ae22932016-09-17 18:31:46 -04006549 if (splice_grow_spd(pipe, &spd))
6550 return -ENOMEM;
6551
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006552 again:
6553 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006554 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006555
Al Viroa786c062014-04-11 12:01:03 -04006556 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006557 struct page *page;
6558 int r;
6559
6560 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006561 if (!ref) {
6562 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006563 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006564 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006565
Steven Rostedt7267fa62009-04-29 00:16:21 -04006566 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006567 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006568 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006569 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006570 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006571 kfree(ref);
6572 break;
6573 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006574 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006575
6576 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006577 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006578 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006579 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6580 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006581 kfree(ref);
6582 break;
6583 }
6584
6585 /*
6586 * zero out any left over data, this is going to
6587 * user land.
6588 */
6589 size = ring_buffer_page_len(ref->page);
6590 if (size < PAGE_SIZE)
6591 memset(ref->page + size, 0, PAGE_SIZE - size);
6592
6593 page = virt_to_page(ref->page);
6594
6595 spd.pages[i] = page;
6596 spd.partial[i].len = PAGE_SIZE;
6597 spd.partial[i].offset = 0;
6598 spd.partial[i].private = (unsigned long)ref;
6599 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006600 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006601
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006602 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006603 }
6604
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006605 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006606 spd.nr_pages = i;
6607
6608 /* did we read anything? */
6609 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006610 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006611 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006612
Al Viro1ae22932016-09-17 18:31:46 -04006613 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006614 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006615 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006616
Rabin Vincente30f53a2014-11-10 19:46:34 +01006617 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006618 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006619 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006620
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006621 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006622 }
6623
6624 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006625out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006626 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006627
Steven Rostedt2cadf912008-12-01 22:20:19 -05006628 return ret;
6629}
6630
6631static const struct file_operations tracing_buffers_fops = {
6632 .open = tracing_buffers_open,
6633 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006634 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006635 .release = tracing_buffers_release,
6636 .splice_read = tracing_buffers_splice_read,
6637 .llseek = no_llseek,
6638};
6639
Steven Rostedtc8d77182009-04-29 18:03:45 -04006640static ssize_t
6641tracing_stats_read(struct file *filp, char __user *ubuf,
6642 size_t count, loff_t *ppos)
6643{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006644 struct inode *inode = file_inode(filp);
6645 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006646 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006647 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006648 struct trace_seq *s;
6649 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006650 unsigned long long t;
6651 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006652
Li Zefane4f2d102009-06-15 10:57:28 +08006653 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006654 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006655 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006656
6657 trace_seq_init(s);
6658
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006659 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006660 trace_seq_printf(s, "entries: %ld\n", cnt);
6661
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006662 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006663 trace_seq_printf(s, "overrun: %ld\n", cnt);
6664
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006665 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006666 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6667
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006668 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006669 trace_seq_printf(s, "bytes: %ld\n", cnt);
6670
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006671 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006672 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006673 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006674 usec_rem = do_div(t, USEC_PER_SEC);
6675 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6676 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006677
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006678 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006679 usec_rem = do_div(t, USEC_PER_SEC);
6680 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6681 } else {
6682 /* counter or tsc mode for trace_clock */
6683 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006684 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006685
6686 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006687 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006688 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006689
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006690 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006691 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6692
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006693 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006694 trace_seq_printf(s, "read events: %ld\n", cnt);
6695
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006696 count = simple_read_from_buffer(ubuf, count, ppos,
6697 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006698
6699 kfree(s);
6700
6701 return count;
6702}
6703
6704static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006705 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006706 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006707 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006708 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006709};
6710
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006711#ifdef CONFIG_DYNAMIC_FTRACE
6712
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006713int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006714{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006715 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006716}
6717
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006718static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006719tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006720 size_t cnt, loff_t *ppos)
6721{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006722 static char ftrace_dyn_info_buffer[1024];
6723 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006724 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006725 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006726 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006727 int r;
6728
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006729 mutex_lock(&dyn_info_mutex);
6730 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006731
Steven Rostedta26a2a22008-10-31 00:03:22 -04006732 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006733 buf[r++] = '\n';
6734
6735 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6736
6737 mutex_unlock(&dyn_info_mutex);
6738
6739 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006740}
6741
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006742static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006743 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006744 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006745 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006746};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006747#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006748
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006749#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6750static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04006751ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006752 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006753 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006754{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04006755 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006756}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006757
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006758static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04006759ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006760 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006761 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006762{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006763 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006764 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006765
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006766 if (mapper)
6767 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006768
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006769 if (count) {
6770
6771 if (*count <= 0)
6772 return;
6773
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006774 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006775 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006776
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04006777 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006778}
6779
6780static int
6781ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6782 struct ftrace_probe_ops *ops, void *data)
6783{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006784 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006785 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006786
6787 seq_printf(m, "%ps:", (void *)ip);
6788
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006789 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006790
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006791 if (mapper)
6792 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6793
6794 if (count)
6795 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006796 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006797 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006798
6799 return 0;
6800}
6801
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006802static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006803ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006804 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006805{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006806 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006807
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006808 if (!mapper) {
6809 mapper = allocate_ftrace_func_mapper();
6810 if (!mapper)
6811 return -ENOMEM;
6812 *data = mapper;
6813 }
6814
6815 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006816}
6817
6818static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006819ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006820 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006821{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006822 struct ftrace_func_mapper *mapper = data;
6823
6824 if (!ip) {
6825 if (!mapper)
6826 return;
6827 free_ftrace_func_mapper(mapper, NULL);
6828 return;
6829 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006830
6831 ftrace_func_mapper_remove_ip(mapper, ip);
6832}
6833
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006834static struct ftrace_probe_ops snapshot_probe_ops = {
6835 .func = ftrace_snapshot,
6836 .print = ftrace_snapshot_print,
6837};
6838
6839static struct ftrace_probe_ops snapshot_count_probe_ops = {
6840 .func = ftrace_count_snapshot,
6841 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006842 .init = ftrace_snapshot_init,
6843 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006844};
6845
6846static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006847ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006848 char *glob, char *cmd, char *param, int enable)
6849{
6850 struct ftrace_probe_ops *ops;
6851 void *count = (void *)-1;
6852 char *number;
6853 int ret;
6854
6855 /* hash funcs only work with set_ftrace_filter */
6856 if (!enable)
6857 return -EINVAL;
6858
6859 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6860
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04006861 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04006862 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006863
6864 if (!param)
6865 goto out_reg;
6866
6867 number = strsep(&param, ":");
6868
6869 if (!strlen(number))
6870 goto out_reg;
6871
6872 /*
6873 * We use the callback data field (which is a pointer)
6874 * as our counter.
6875 */
6876 ret = kstrtoul(number, 0, (unsigned long *)&count);
6877 if (ret)
6878 return ret;
6879
6880 out_reg:
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006881 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006882
6883 if (ret >= 0)
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04006884 alloc_snapshot(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006885
6886 return ret < 0 ? ret : 0;
6887}
6888
6889static struct ftrace_func_command ftrace_snapshot_cmd = {
6890 .name = "snapshot",
6891 .func = ftrace_trace_snapshot_callback,
6892};
6893
Tom Zanussi38de93a2013-10-24 08:34:18 -05006894static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006895{
6896 return register_ftrace_command(&ftrace_snapshot_cmd);
6897}
6898#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006899static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006900#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006901
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006902static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006903{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006904 if (WARN_ON(!tr->dir))
6905 return ERR_PTR(-ENODEV);
6906
6907 /* Top directory uses NULL as the parent */
6908 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6909 return NULL;
6910
6911 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006912 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006913}
6914
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006915static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6916{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006917 struct dentry *d_tracer;
6918
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006919 if (tr->percpu_dir)
6920 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006921
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006922 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006923 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006924 return NULL;
6925
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006926 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006927
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006928 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006929 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006930
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006931 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006932}
6933
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006934static struct dentry *
6935trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6936 void *data, long cpu, const struct file_operations *fops)
6937{
6938 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6939
6940 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006941 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006942 return ret;
6943}
6944
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006945static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006946tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006947{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006948 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006949 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006950 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006951
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006952 if (!d_percpu)
6953 return;
6954
Steven Rostedtdd49a382010-10-20 21:51:26 -04006955 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006956 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006957 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006958 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006959 return;
6960 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006961
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006962 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006963 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006964 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006965
6966 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006967 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006968 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006969
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006970 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006971 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006972
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006973 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006974 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006975
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006976 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006977 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006978
6979#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006980 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006981 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006982
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006983 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006984 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006985#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006986}
6987
Steven Rostedt60a11772008-05-12 21:20:44 +02006988#ifdef CONFIG_FTRACE_SELFTEST
6989/* Let selftest have access to static functions in this file */
6990#include "trace_selftest.c"
6991#endif
6992
Steven Rostedt577b7852009-02-26 23:43:05 -05006993static ssize_t
6994trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6995 loff_t *ppos)
6996{
6997 struct trace_option_dentry *topt = filp->private_data;
6998 char *buf;
6999
7000 if (topt->flags->val & topt->opt->bit)
7001 buf = "1\n";
7002 else
7003 buf = "0\n";
7004
7005 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7006}
7007
7008static ssize_t
7009trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7010 loff_t *ppos)
7011{
7012 struct trace_option_dentry *topt = filp->private_data;
7013 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007014 int ret;
7015
Peter Huewe22fe9b52011-06-07 21:58:27 +02007016 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7017 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007018 return ret;
7019
Li Zefan8d18eaa2009-12-08 11:17:06 +08007020 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007021 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007022
7023 if (!!(topt->flags->val & topt->opt->bit) != val) {
7024 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007025 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007026 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007027 mutex_unlock(&trace_types_lock);
7028 if (ret)
7029 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007030 }
7031
7032 *ppos += cnt;
7033
7034 return cnt;
7035}
7036
7037
7038static const struct file_operations trace_options_fops = {
7039 .open = tracing_open_generic,
7040 .read = trace_options_read,
7041 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007042 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007043};
7044
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007045/*
7046 * In order to pass in both the trace_array descriptor as well as the index
7047 * to the flag that the trace option file represents, the trace_array
7048 * has a character array of trace_flags_index[], which holds the index
7049 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7050 * The address of this character array is passed to the flag option file
7051 * read/write callbacks.
7052 *
7053 * In order to extract both the index and the trace_array descriptor,
7054 * get_tr_index() uses the following algorithm.
7055 *
7056 * idx = *ptr;
7057 *
7058 * As the pointer itself contains the address of the index (remember
7059 * index[1] == 1).
7060 *
7061 * Then to get the trace_array descriptor, by subtracting that index
7062 * from the ptr, we get to the start of the index itself.
7063 *
7064 * ptr - idx == &index[0]
7065 *
7066 * Then a simple container_of() from that pointer gets us to the
7067 * trace_array descriptor.
7068 */
7069static void get_tr_index(void *data, struct trace_array **ptr,
7070 unsigned int *pindex)
7071{
7072 *pindex = *(unsigned char *)data;
7073
7074 *ptr = container_of(data - *pindex, struct trace_array,
7075 trace_flags_index);
7076}
7077
Steven Rostedta8259072009-02-26 22:19:12 -05007078static ssize_t
7079trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7080 loff_t *ppos)
7081{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007082 void *tr_index = filp->private_data;
7083 struct trace_array *tr;
7084 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007085 char *buf;
7086
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007087 get_tr_index(tr_index, &tr, &index);
7088
7089 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007090 buf = "1\n";
7091 else
7092 buf = "0\n";
7093
7094 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7095}
7096
7097static ssize_t
7098trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7099 loff_t *ppos)
7100{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007101 void *tr_index = filp->private_data;
7102 struct trace_array *tr;
7103 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007104 unsigned long val;
7105 int ret;
7106
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007107 get_tr_index(tr_index, &tr, &index);
7108
Peter Huewe22fe9b52011-06-07 21:58:27 +02007109 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7110 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007111 return ret;
7112
Zhaoleif2d84b62009-08-07 18:55:48 +08007113 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007114 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007115
7116 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007117 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007118 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007119
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007120 if (ret < 0)
7121 return ret;
7122
Steven Rostedta8259072009-02-26 22:19:12 -05007123 *ppos += cnt;
7124
7125 return cnt;
7126}
7127
Steven Rostedta8259072009-02-26 22:19:12 -05007128static const struct file_operations trace_options_core_fops = {
7129 .open = tracing_open_generic,
7130 .read = trace_options_core_read,
7131 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007132 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007133};
7134
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007135struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007136 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007137 struct dentry *parent,
7138 void *data,
7139 const struct file_operations *fops)
7140{
7141 struct dentry *ret;
7142
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007143 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007144 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007145 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007146
7147 return ret;
7148}
7149
7150
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007151static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007152{
7153 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007154
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007155 if (tr->options)
7156 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007157
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007158 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007159 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007160 return NULL;
7161
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007162 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007163 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007164 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007165 return NULL;
7166 }
7167
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007168 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007169}
7170
Steven Rostedt577b7852009-02-26 23:43:05 -05007171static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007172create_trace_option_file(struct trace_array *tr,
7173 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007174 struct tracer_flags *flags,
7175 struct tracer_opt *opt)
7176{
7177 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007178
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007179 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007180 if (!t_options)
7181 return;
7182
7183 topt->flags = flags;
7184 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007185 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007186
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007187 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007188 &trace_options_fops);
7189
Steven Rostedt577b7852009-02-26 23:43:05 -05007190}
7191
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007192static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007193create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007194{
7195 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007196 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007197 struct tracer_flags *flags;
7198 struct tracer_opt *opts;
7199 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007200 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007201
7202 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007203 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007204
7205 flags = tracer->flags;
7206
7207 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007208 return;
7209
7210 /*
7211 * If this is an instance, only create flags for tracers
7212 * the instance may have.
7213 */
7214 if (!trace_ok_for_array(tracer, tr))
7215 return;
7216
7217 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007218 /* Make sure there's no duplicate flags. */
7219 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007220 return;
7221 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007222
7223 opts = flags->opts;
7224
7225 for (cnt = 0; opts[cnt].name; cnt++)
7226 ;
7227
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007228 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007229 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007230 return;
7231
7232 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7233 GFP_KERNEL);
7234 if (!tr_topts) {
7235 kfree(topts);
7236 return;
7237 }
7238
7239 tr->topts = tr_topts;
7240 tr->topts[tr->nr_topts].tracer = tracer;
7241 tr->topts[tr->nr_topts].topts = topts;
7242 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007243
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007244 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007245 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007246 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007247 WARN_ONCE(topts[cnt].entry == NULL,
7248 "Failed to create trace option: %s",
7249 opts[cnt].name);
7250 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007251}
7252
Steven Rostedta8259072009-02-26 22:19:12 -05007253static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007254create_trace_option_core_file(struct trace_array *tr,
7255 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007256{
7257 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007258
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007259 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007260 if (!t_options)
7261 return NULL;
7262
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007263 return trace_create_file(option, 0644, t_options,
7264 (void *)&tr->trace_flags_index[index],
7265 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007266}
7267
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007268static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007269{
7270 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007271 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007272 int i;
7273
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007274 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007275 if (!t_options)
7276 return;
7277
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007278 for (i = 0; trace_options[i]; i++) {
7279 if (top_level ||
7280 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7281 create_trace_option_core_file(tr, trace_options[i], i);
7282 }
Steven Rostedta8259072009-02-26 22:19:12 -05007283}
7284
Steven Rostedt499e5472012-02-22 15:50:28 -05007285static ssize_t
7286rb_simple_read(struct file *filp, char __user *ubuf,
7287 size_t cnt, loff_t *ppos)
7288{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007289 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007290 char buf[64];
7291 int r;
7292
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007293 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007294 r = sprintf(buf, "%d\n", r);
7295
7296 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7297}
7298
7299static ssize_t
7300rb_simple_write(struct file *filp, const char __user *ubuf,
7301 size_t cnt, loff_t *ppos)
7302{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007303 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007304 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007305 unsigned long val;
7306 int ret;
7307
7308 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7309 if (ret)
7310 return ret;
7311
7312 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007313 mutex_lock(&trace_types_lock);
7314 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007315 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007316 if (tr->current_trace->start)
7317 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007318 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007319 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007320 if (tr->current_trace->stop)
7321 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007322 }
7323 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007324 }
7325
7326 (*ppos)++;
7327
7328 return cnt;
7329}
7330
7331static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007332 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007333 .read = rb_simple_read,
7334 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007335 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007336 .llseek = default_llseek,
7337};
7338
Steven Rostedt277ba042012-08-03 16:10:49 -04007339struct dentry *trace_instance_dir;
7340
7341static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007342init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007343
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007344static int
7345allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007346{
7347 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007348
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007349 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007350
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007351 buf->tr = tr;
7352
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007353 buf->buffer = ring_buffer_alloc(size, rb_flags);
7354 if (!buf->buffer)
7355 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007356
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007357 buf->data = alloc_percpu(struct trace_array_cpu);
7358 if (!buf->data) {
7359 ring_buffer_free(buf->buffer);
7360 return -ENOMEM;
7361 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007362
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007363 /* Allocate the first page for all buffers */
7364 set_buffer_entries(&tr->trace_buffer,
7365 ring_buffer_size(tr->trace_buffer.buffer, 0));
7366
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007367 return 0;
7368}
7369
7370static int allocate_trace_buffers(struct trace_array *tr, int size)
7371{
7372 int ret;
7373
7374 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7375 if (ret)
7376 return ret;
7377
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007378#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007379 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7380 allocate_snapshot ? size : 1);
7381 if (WARN_ON(ret)) {
7382 ring_buffer_free(tr->trace_buffer.buffer);
7383 free_percpu(tr->trace_buffer.data);
7384 return -ENOMEM;
7385 }
7386 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007387
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007388 /*
7389 * Only the top level trace array gets its snapshot allocated
7390 * from the kernel command line.
7391 */
7392 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007393#endif
7394 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007395}
7396
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007397static void free_trace_buffer(struct trace_buffer *buf)
7398{
7399 if (buf->buffer) {
7400 ring_buffer_free(buf->buffer);
7401 buf->buffer = NULL;
7402 free_percpu(buf->data);
7403 buf->data = NULL;
7404 }
7405}
7406
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007407static void free_trace_buffers(struct trace_array *tr)
7408{
7409 if (!tr)
7410 return;
7411
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007412 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007413
7414#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007415 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007416#endif
7417}
7418
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007419static void init_trace_flags_index(struct trace_array *tr)
7420{
7421 int i;
7422
7423 /* Used by the trace options files */
7424 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7425 tr->trace_flags_index[i] = i;
7426}
7427
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007428static void __update_tracer_options(struct trace_array *tr)
7429{
7430 struct tracer *t;
7431
7432 for (t = trace_types; t; t = t->next)
7433 add_tracer_options(tr, t);
7434}
7435
7436static void update_tracer_options(struct trace_array *tr)
7437{
7438 mutex_lock(&trace_types_lock);
7439 __update_tracer_options(tr);
7440 mutex_unlock(&trace_types_lock);
7441}
7442
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007443static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007444{
Steven Rostedt277ba042012-08-03 16:10:49 -04007445 struct trace_array *tr;
7446 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007447
7448 mutex_lock(&trace_types_lock);
7449
7450 ret = -EEXIST;
7451 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7452 if (tr->name && strcmp(tr->name, name) == 0)
7453 goto out_unlock;
7454 }
7455
7456 ret = -ENOMEM;
7457 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7458 if (!tr)
7459 goto out_unlock;
7460
7461 tr->name = kstrdup(name, GFP_KERNEL);
7462 if (!tr->name)
7463 goto out_free_tr;
7464
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007465 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7466 goto out_free_tr;
7467
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007468 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007469
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007470 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7471
Steven Rostedt277ba042012-08-03 16:10:49 -04007472 raw_spin_lock_init(&tr->start_lock);
7473
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007474 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7475
Steven Rostedt277ba042012-08-03 16:10:49 -04007476 tr->current_trace = &nop_trace;
7477
7478 INIT_LIST_HEAD(&tr->systems);
7479 INIT_LIST_HEAD(&tr->events);
7480
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007481 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007482 goto out_free_tr;
7483
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007484 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007485 if (!tr->dir)
7486 goto out_free_tr;
7487
7488 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007489 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007490 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007491 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007492 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007493
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007494 ftrace_init_trace_array(tr);
7495
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007496 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007497 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007498 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007499
7500 list_add(&tr->list, &ftrace_trace_arrays);
7501
7502 mutex_unlock(&trace_types_lock);
7503
7504 return 0;
7505
7506 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007507 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007508 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007509 kfree(tr->name);
7510 kfree(tr);
7511
7512 out_unlock:
7513 mutex_unlock(&trace_types_lock);
7514
7515 return ret;
7516
7517}
7518
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007519static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007520{
7521 struct trace_array *tr;
7522 int found = 0;
7523 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007524 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007525
7526 mutex_lock(&trace_types_lock);
7527
7528 ret = -ENODEV;
7529 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7530 if (tr->name && strcmp(tr->name, name) == 0) {
7531 found = 1;
7532 break;
7533 }
7534 }
7535 if (!found)
7536 goto out_unlock;
7537
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007538 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007539 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007540 goto out_unlock;
7541
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007542 list_del(&tr->list);
7543
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007544 /* Disable all the flags that were enabled coming in */
7545 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7546 if ((1 << i) & ZEROED_TRACE_FLAGS)
7547 set_tracer_flag(tr, 1 << i, 0);
7548 }
7549
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007550 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007551 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007552 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007553 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007554 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007555
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007556 for (i = 0; i < tr->nr_topts; i++) {
7557 kfree(tr->topts[i].topts);
7558 }
7559 kfree(tr->topts);
7560
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007561 kfree(tr->name);
7562 kfree(tr);
7563
7564 ret = 0;
7565
7566 out_unlock:
7567 mutex_unlock(&trace_types_lock);
7568
7569 return ret;
7570}
7571
Steven Rostedt277ba042012-08-03 16:10:49 -04007572static __init void create_trace_instances(struct dentry *d_tracer)
7573{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007574 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7575 instance_mkdir,
7576 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007577 if (WARN_ON(!trace_instance_dir))
7578 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007579}
7580
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007581static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007582init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007583{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007584 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007585
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007586 trace_create_file("available_tracers", 0444, d_tracer,
7587 tr, &show_traces_fops);
7588
7589 trace_create_file("current_tracer", 0644, d_tracer,
7590 tr, &set_tracer_fops);
7591
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007592 trace_create_file("tracing_cpumask", 0644, d_tracer,
7593 tr, &tracing_cpumask_fops);
7594
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007595 trace_create_file("trace_options", 0644, d_tracer,
7596 tr, &tracing_iter_fops);
7597
7598 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007599 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007600
7601 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007602 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007603
7604 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007605 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007606
7607 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7608 tr, &tracing_total_entries_fops);
7609
Wang YanQing238ae932013-05-26 16:52:01 +08007610 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007611 tr, &tracing_free_buffer_fops);
7612
7613 trace_create_file("trace_marker", 0220, d_tracer,
7614 tr, &tracing_mark_fops);
7615
Steven Rostedtfa32e852016-07-06 15:25:08 -04007616 trace_create_file("trace_marker_raw", 0220, d_tracer,
7617 tr, &tracing_mark_raw_fops);
7618
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007619 trace_create_file("trace_clock", 0644, d_tracer, tr,
7620 &trace_clock_fops);
7621
7622 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007623 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007624
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007625 create_trace_options_dir(tr);
7626
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007627#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007628 trace_create_file("tracing_max_latency", 0644, d_tracer,
7629 &tr->max_latency, &tracing_max_lat_fops);
7630#endif
7631
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007632 if (ftrace_create_function_files(tr, d_tracer))
7633 WARN(1, "Could not allocate function filter files");
7634
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007635#ifdef CONFIG_TRACER_SNAPSHOT
7636 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007637 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007638#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007639
7640 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007641 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007642
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007643 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007644}
7645
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007646static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007647{
7648 struct vfsmount *mnt;
7649 struct file_system_type *type;
7650
7651 /*
7652 * To maintain backward compatibility for tools that mount
7653 * debugfs to get to the tracing facility, tracefs is automatically
7654 * mounted to the debugfs/tracing directory.
7655 */
7656 type = get_fs_type("tracefs");
7657 if (!type)
7658 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007659 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007660 put_filesystem(type);
7661 if (IS_ERR(mnt))
7662 return NULL;
7663 mntget(mnt);
7664
7665 return mnt;
7666}
7667
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007668/**
7669 * tracing_init_dentry - initialize top level trace array
7670 *
7671 * This is called when creating files or directories in the tracing
7672 * directory. It is called via fs_initcall() by any of the boot up code
7673 * and expects to return the dentry of the top level tracing directory.
7674 */
7675struct dentry *tracing_init_dentry(void)
7676{
7677 struct trace_array *tr = &global_trace;
7678
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007679 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007680 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007681 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007682
Jiaxing Wang8b129192015-11-06 16:04:16 +08007683 if (WARN_ON(!tracefs_initialized()) ||
7684 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7685 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007686 return ERR_PTR(-ENODEV);
7687
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007688 /*
7689 * As there may still be users that expect the tracing
7690 * files to exist in debugfs/tracing, we must automount
7691 * the tracefs file system there, so older tools still
7692 * work with the newer kerenl.
7693 */
7694 tr->dir = debugfs_create_automount("tracing", NULL,
7695 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007696 if (!tr->dir) {
7697 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7698 return ERR_PTR(-ENOMEM);
7699 }
7700
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007701 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007702}
7703
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007704extern struct trace_enum_map *__start_ftrace_enum_maps[];
7705extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7706
7707static void __init trace_enum_init(void)
7708{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007709 int len;
7710
7711 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007712 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007713}
7714
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007715#ifdef CONFIG_MODULES
7716static void trace_module_add_enums(struct module *mod)
7717{
7718 if (!mod->num_trace_enums)
7719 return;
7720
7721 /*
7722 * Modules with bad taint do not have events created, do
7723 * not bother with enums either.
7724 */
7725 if (trace_module_has_bad_taint(mod))
7726 return;
7727
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007728 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007729}
7730
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007731#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7732static void trace_module_remove_enums(struct module *mod)
7733{
7734 union trace_enum_map_item *map;
7735 union trace_enum_map_item **last = &trace_enum_maps;
7736
7737 if (!mod->num_trace_enums)
7738 return;
7739
7740 mutex_lock(&trace_enum_mutex);
7741
7742 map = trace_enum_maps;
7743
7744 while (map) {
7745 if (map->head.mod == mod)
7746 break;
7747 map = trace_enum_jmp_to_tail(map);
7748 last = &map->tail.next;
7749 map = map->tail.next;
7750 }
7751 if (!map)
7752 goto out;
7753
7754 *last = trace_enum_jmp_to_tail(map)->tail.next;
7755 kfree(map);
7756 out:
7757 mutex_unlock(&trace_enum_mutex);
7758}
7759#else
7760static inline void trace_module_remove_enums(struct module *mod) { }
7761#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7762
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007763static int trace_module_notify(struct notifier_block *self,
7764 unsigned long val, void *data)
7765{
7766 struct module *mod = data;
7767
7768 switch (val) {
7769 case MODULE_STATE_COMING:
7770 trace_module_add_enums(mod);
7771 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007772 case MODULE_STATE_GOING:
7773 trace_module_remove_enums(mod);
7774 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007775 }
7776
7777 return 0;
7778}
7779
7780static struct notifier_block trace_module_nb = {
7781 .notifier_call = trace_module_notify,
7782 .priority = 0,
7783};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007784#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007785
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007786static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007787{
7788 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007789
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007790 trace_access_lock_init();
7791
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007792 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007793 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007794 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007795
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007796 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007797 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007798
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007799 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007800 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007801
Li Zefan339ae5d2009-04-17 10:34:30 +08007802 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007803 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007804
Avadh Patel69abe6a2009-04-10 16:04:48 -04007805 trace_create_file("saved_cmdlines", 0444, d_tracer,
7806 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007807
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007808 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7809 NULL, &tracing_saved_cmdlines_size_fops);
7810
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007811 trace_enum_init();
7812
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007813 trace_create_enum_file(d_tracer);
7814
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007815#ifdef CONFIG_MODULES
7816 register_module_notifier(&trace_module_nb);
7817#endif
7818
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007819#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007820 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7821 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007822#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007823
Steven Rostedt277ba042012-08-03 16:10:49 -04007824 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007825
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007826 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007827
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007828 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007829}
7830
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007831static int trace_panic_handler(struct notifier_block *this,
7832 unsigned long event, void *unused)
7833{
Steven Rostedt944ac422008-10-23 19:26:08 -04007834 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007835 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007836 return NOTIFY_OK;
7837}
7838
7839static struct notifier_block trace_panic_notifier = {
7840 .notifier_call = trace_panic_handler,
7841 .next = NULL,
7842 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7843};
7844
7845static int trace_die_handler(struct notifier_block *self,
7846 unsigned long val,
7847 void *data)
7848{
7849 switch (val) {
7850 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007851 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007852 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007853 break;
7854 default:
7855 break;
7856 }
7857 return NOTIFY_OK;
7858}
7859
7860static struct notifier_block trace_die_notifier = {
7861 .notifier_call = trace_die_handler,
7862 .priority = 200
7863};
7864
7865/*
7866 * printk is set to max of 1024, we really don't need it that big.
7867 * Nothing should be printing 1000 characters anyway.
7868 */
7869#define TRACE_MAX_PRINT 1000
7870
7871/*
7872 * Define here KERN_TRACE so that we have one place to modify
7873 * it if we decide to change what log level the ftrace dump
7874 * should be at.
7875 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007876#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007877
Jason Wessel955b61e2010-08-05 09:22:23 -05007878void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007879trace_printk_seq(struct trace_seq *s)
7880{
7881 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007882 if (s->seq.len >= TRACE_MAX_PRINT)
7883 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007884
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007885 /*
7886 * More paranoid code. Although the buffer size is set to
7887 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7888 * an extra layer of protection.
7889 */
7890 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7891 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007892
7893 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007894 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007895
7896 printk(KERN_TRACE "%s", s->buffer);
7897
Steven Rostedtf9520752009-03-02 14:04:40 -05007898 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007899}
7900
Jason Wessel955b61e2010-08-05 09:22:23 -05007901void trace_init_global_iter(struct trace_iterator *iter)
7902{
7903 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007904 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007905 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007906 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007907
7908 if (iter->trace && iter->trace->open)
7909 iter->trace->open(iter);
7910
7911 /* Annotate start of buffers if we had overruns */
7912 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7913 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7914
7915 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7916 if (trace_clocks[iter->tr->clock_id].in_ns)
7917 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007918}
7919
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007920void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007921{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007922 /* use static because iter can be a bit big for the stack */
7923 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007924 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007925 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007926 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007927 unsigned long flags;
7928 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007929
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007930 /* Only allow one dump user at a time. */
7931 if (atomic_inc_return(&dump_running) != 1) {
7932 atomic_dec(&dump_running);
7933 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007934 }
7935
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007936 /*
7937 * Always turn off tracing when we dump.
7938 * We don't need to show trace output of what happens
7939 * between multiple crashes.
7940 *
7941 * If the user does a sysrq-z, then they can re-enable
7942 * tracing with echo 1 > tracing_on.
7943 */
7944 tracing_off();
7945
7946 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007947
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007948 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007949 trace_init_global_iter(&iter);
7950
Steven Rostedtd7690412008-10-01 00:29:53 -04007951 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307952 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007953 }
7954
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007955 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007956
Török Edwinb54d3de2008-11-22 13:28:48 +02007957 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007958 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007959
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007960 switch (oops_dump_mode) {
7961 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007962 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007963 break;
7964 case DUMP_ORIG:
7965 iter.cpu_file = raw_smp_processor_id();
7966 break;
7967 case DUMP_NONE:
7968 goto out_enable;
7969 default:
7970 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007971 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007972 }
7973
7974 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007975
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007976 /* Did function tracer already get disabled? */
7977 if (ftrace_is_dead()) {
7978 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7979 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7980 }
7981
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007982 /*
7983 * We need to stop all tracing on all CPUS to read the
7984 * the next buffer. This is a bit expensive, but is
7985 * not done often. We fill all what we can read,
7986 * and then release the locks again.
7987 */
7988
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007989 while (!trace_empty(&iter)) {
7990
7991 if (!cnt)
7992 printk(KERN_TRACE "---------------------------------\n");
7993
7994 cnt++;
7995
7996 /* reset all but tr, trace, and overruns */
7997 memset(&iter.seq, 0,
7998 sizeof(struct trace_iterator) -
7999 offsetof(struct trace_iterator, seq));
8000 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8001 iter.pos = -1;
8002
Jason Wessel955b61e2010-08-05 09:22:23 -05008003 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08008004 int ret;
8005
8006 ret = print_trace_line(&iter);
8007 if (ret != TRACE_TYPE_NO_CONSUME)
8008 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008009 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05008010 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008011
8012 trace_printk_seq(&iter.seq);
8013 }
8014
8015 if (!cnt)
8016 printk(KERN_TRACE " (ftrace buffer empty)\n");
8017 else
8018 printk(KERN_TRACE "---------------------------------\n");
8019
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008020 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008021 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008022
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008023 for_each_tracing_cpu(cpu) {
8024 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008025 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008026 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008027 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008028}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008029EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008030
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008031__init static int tracer_alloc_buffers(void)
8032{
Steven Rostedt73c51622009-03-11 13:42:01 -04008033 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308034 int ret = -ENOMEM;
8035
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008036 /*
8037 * Make sure we don't accidently add more trace options
8038 * than we have bits for.
8039 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008040 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008041
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308042 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8043 goto out;
8044
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008045 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308046 goto out_free_buffer_mask;
8047
Steven Rostedt07d777f2011-09-22 14:01:55 -04008048 /* Only allocate trace_printk buffers if a trace_printk exists */
8049 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04008050 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04008051 trace_printk_init_buffers();
8052
Steven Rostedt73c51622009-03-11 13:42:01 -04008053 /* To save memory, keep the ring buffer size to its minimum */
8054 if (ring_buffer_expanded)
8055 ring_buf_size = trace_buf_size;
8056 else
8057 ring_buf_size = 1;
8058
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308059 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008060 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008061
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008062 raw_spin_lock_init(&global_trace.start_lock);
8063
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008064 /*
8065 * The prepare callbacks allocates some memory for the ring buffer. We
8066 * don't free the buffer if the if the CPU goes down. If we were to free
8067 * the buffer, then the user would lose any trace that was in the
8068 * buffer. The memory will be removed once the "instance" is removed.
8069 */
8070 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8071 "trace/RB:preapre", trace_rb_cpu_prepare,
8072 NULL);
8073 if (ret < 0)
8074 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008075 /* Used for event triggers */
8076 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8077 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008078 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008079
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008080 if (trace_create_savedcmd() < 0)
8081 goto out_free_temp_buffer;
8082
Steven Rostedtab464282008-05-12 21:21:00 +02008083 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008084 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008085 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8086 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008087 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008088 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04008089
Steven Rostedt499e5472012-02-22 15:50:28 -05008090 if (global_trace.buffer_disabled)
8091 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008092
Steven Rostedte1e232c2014-02-10 23:38:46 -05008093 if (trace_boot_clock) {
8094 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8095 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008096 pr_warn("Trace clock %s not defined, going back to default\n",
8097 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05008098 }
8099
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008100 /*
8101 * register_tracer() might reference current_trace, so it
8102 * needs to be set before we register anything. This is
8103 * just a bootstrap of current_trace anyway.
8104 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008105 global_trace.current_trace = &nop_trace;
8106
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008107 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8108
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05008109 ftrace_init_global_array_ops(&global_trace);
8110
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008111 init_trace_flags_index(&global_trace);
8112
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008113 register_tracer(&nop_trace);
8114
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05008115 /* Function tracing may start here (via kernel command line) */
8116 init_function_trace();
8117
Steven Rostedt60a11772008-05-12 21:20:44 +02008118 /* All seems OK, enable tracing */
8119 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008120
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008121 atomic_notifier_chain_register(&panic_notifier_list,
8122 &trace_panic_notifier);
8123
8124 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008125
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008126 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8127
8128 INIT_LIST_HEAD(&global_trace.systems);
8129 INIT_LIST_HEAD(&global_trace.events);
8130 list_add(&global_trace.list, &ftrace_trace_arrays);
8131
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08008132 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04008133
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008134 register_snapshot_cmd();
8135
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008136 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008137
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008138out_free_savedcmd:
8139 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008140out_free_temp_buffer:
8141 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008142out_rm_hp_state:
8143 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308144out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008145 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308146out_free_buffer_mask:
8147 free_cpumask_var(tracing_buffer_mask);
8148out:
8149 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008150}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008151
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008152void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008153{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008154 if (tracepoint_printk) {
8155 tracepoint_print_iter =
8156 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8157 if (WARN_ON(!tracepoint_print_iter))
8158 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008159 else
8160 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008161 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008162 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008163}
8164
8165void __init trace_init(void)
8166{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008167 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008168}
8169
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008170__init static int clear_boot_tracer(void)
8171{
8172 /*
8173 * The default tracer at boot buffer is an init section.
8174 * This function is called in lateinit. If we did not
8175 * find the boot tracer, then clear it out, to prevent
8176 * later registration from accessing the buffer that is
8177 * about to be freed.
8178 */
8179 if (!default_bootup_tracer)
8180 return 0;
8181
8182 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8183 default_bootup_tracer);
8184 default_bootup_tracer = NULL;
8185
8186 return 0;
8187}
8188
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008189fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008190late_initcall(clear_boot_tracer);