blob: 368310e78d45c28c269b672fb0504e8366b3b178 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080043#include <linux/trace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060044#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020046#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050047#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010049/*
Steven Rostedt73c51622009-03-11 13:42:01 -040050 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050053bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040054
55/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010059 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * at the same time, giving false positive or negative results.
61 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063
Steven Rostedtb2821ae2009-02-02 21:38:32 -050064/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
Li Zefan020e5f82009-07-01 10:47:05 +080067bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050068
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050069/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050072static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050073
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010074/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050079static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010081{
82 return 0;
83}
Steven Rostedt0f048702008-11-05 16:05:44 -050084
85/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040086 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
90static DEFINE_PER_CPU(bool, trace_cmdline_save);
91
92/*
Steven Rostedt0f048702008-11-05 16:05:44 -050093 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
Hannes Eder4fd27352009-02-10 19:44:12 +010098static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050099
Jason Wessel955b61e2010-08-05 09:22:23 -0500100cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200101
Steven Rostedt944ac422008-10-23 19:26:08 -0400102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400116 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200117
118enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400119
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400123#ifdef CONFIG_TRACE_ENUM_MAP_FILE
124/* Map of enums to their values, for "enum_map" file */
125struct trace_enum_map_head {
126 struct module *mod;
127 unsigned long length;
128};
129
130union trace_enum_map_item;
131
132struct trace_enum_map_tail {
133 /*
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "enum_string"
136 */
137 union trace_enum_map_item *next;
138 const char *end; /* points to NULL */
139};
140
141static DEFINE_MUTEX(trace_enum_mutex);
142
143/*
144 * The trace_enum_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved enum_map items.
149 */
150union trace_enum_map_item {
151 struct trace_enum_map map;
152 struct trace_enum_map_head head;
153 struct trace_enum_map_tail tail;
154};
155
156static union trace_enum_map_item *trace_enum_maps;
157#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
158
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500160
Li Zefanee6c2c12009-09-18 14:06:47 +0800161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100164
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165static bool allocate_snapshot;
166
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200167static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100168{
Chen Gang67012ab2013-04-08 12:06:44 +0800169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500170 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400171 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173 return 1;
174}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176
Steven Rostedt944ac422008-10-23 19:26:08 -0400177static int __init set_ftrace_dump_on_oops(char *str)
178{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200192
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400193static int __init stop_trace_on_warning(char *str)
194{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400197 return 1;
198}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200199__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400201static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400208__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500209
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static int __init set_trace_boot_options(char *str)
214{
Chen Gang67012ab2013-04-08 12:06:44 +0800215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
Steven Rostedte1e232c2014-02-10 23:38:46 -0500220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400238
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100239unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200265 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200269
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400270LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400303int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400309 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 return 1;
311 }
312
313 return 0;
314}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500315
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
Steven Rostedtd8275c42016-04-14 12:15:22 -0400322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400516 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700573 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400574 return trace_clock_local();
575
Alexander Z Lam94571582013-08-02 18:36:16 -0700576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400578
579 return ts;
580}
581
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100582u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
Steven Rostedt90369902008-11-05 16:05:44 -0500596int tracing_is_enabled(void)
597{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500605}
606
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200607/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400618
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200622static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700627DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500657 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
Steven Rostedtae3b5092013-01-23 15:22:59 -0500663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500673 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400730{
731}
732
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400733#endif
734
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400760static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200777/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
793 __this_cpu_write(trace_cmdline_save, true);
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800818 int pc;
819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800821 return 0;
822
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800823 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500824
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800868 int pc;
869
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800871 return 0;
872
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800873 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500874
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500896#ifdef CONFIG_TRACER_SNAPSHOT
897/**
898 * trace_snapshot - take a snapshot of the current buffer.
899 *
900 * This causes a swap between the snapshot buffer and the current live
901 * tracing buffer. You can use this to take snapshots of the live
902 * trace when some condition is triggered, but continue to trace.
903 *
904 * Note, make sure to allocate the snapshot with either
905 * a tracing_snapshot_alloc(), or by doing it manually
906 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
907 *
908 * If the snapshot buffer is not allocated, it will stop tracing.
909 * Basically making a permanent snapshot.
910 */
911void tracing_snapshot(void)
912{
913 struct trace_array *tr = &global_trace;
914 struct tracer *tracer = tr->current_trace;
915 unsigned long flags;
916
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500917 if (in_nmi()) {
918 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
919 internal_trace_puts("*** snapshot is being ignored ***\n");
920 return;
921 }
922
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500923 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500924 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
925 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500926 tracing_off();
927 return;
928 }
929
930 /* Note, snapshot can not be used when the tracer uses it */
931 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500932 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
933 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500934 return;
935 }
936
937 local_irq_save(flags);
938 update_max_tr(tr, current, smp_processor_id());
939 local_irq_restore(flags);
940}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500941EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500942
943static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
944 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400945static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
946
947static int alloc_snapshot(struct trace_array *tr)
948{
949 int ret;
950
951 if (!tr->allocated_snapshot) {
952
953 /* allocate spare buffer */
954 ret = resize_buffer_duplicate_size(&tr->max_buffer,
955 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
956 if (ret < 0)
957 return ret;
958
959 tr->allocated_snapshot = true;
960 }
961
962 return 0;
963}
964
Fabian Frederickad1438a2014-04-17 21:44:42 +0200965static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400966{
967 /*
968 * We don't free the ring buffer. instead, resize it because
969 * The max_tr ring buffer has some state (e.g. ring->clock) and
970 * we want preserve it.
971 */
972 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
973 set_buffer_entries(&tr->max_buffer, 1);
974 tracing_reset_online_cpus(&tr->max_buffer);
975 tr->allocated_snapshot = false;
976}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500977
978/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500979 * tracing_alloc_snapshot - allocate snapshot buffer.
980 *
981 * This only allocates the snapshot buffer if it isn't already
982 * allocated - it doesn't also take a snapshot.
983 *
984 * This is meant to be used in cases where the snapshot buffer needs
985 * to be set up for events that can't sleep but need to be able to
986 * trigger a snapshot.
987 */
988int tracing_alloc_snapshot(void)
989{
990 struct trace_array *tr = &global_trace;
991 int ret;
992
993 ret = alloc_snapshot(tr);
994 WARN_ON(ret < 0);
995
996 return ret;
997}
998EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
999
1000/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001001 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1002 *
1003 * This is similar to trace_snapshot(), but it will allocate the
1004 * snapshot buffer if it isn't already allocated. Use this only
1005 * where it is safe to sleep, as the allocation may sleep.
1006 *
1007 * This causes a swap between the snapshot buffer and the current live
1008 * tracing buffer. You can use this to take snapshots of the live
1009 * trace when some condition is triggered, but continue to trace.
1010 */
1011void tracing_snapshot_alloc(void)
1012{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001013 int ret;
1014
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001015 ret = tracing_alloc_snapshot();
1016 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001017 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001018
1019 tracing_snapshot();
1020}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001021EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001022#else
1023void tracing_snapshot(void)
1024{
1025 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1026}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001027EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001028int tracing_alloc_snapshot(void)
1029{
1030 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1031 return -ENODEV;
1032}
1033EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001034void tracing_snapshot_alloc(void)
1035{
1036 /* Give warning */
1037 tracing_snapshot();
1038}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001039EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001040#endif /* CONFIG_TRACER_SNAPSHOT */
1041
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001042static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001043{
1044 if (tr->trace_buffer.buffer)
1045 ring_buffer_record_off(tr->trace_buffer.buffer);
1046 /*
1047 * This flag is looked at when buffers haven't been allocated
1048 * yet, or by some tracers (like irqsoff), that just want to
1049 * know if the ring buffer has been disabled, but it can handle
1050 * races of where it gets disabled but we still do a record.
1051 * As the check is in the fast path of the tracers, it is more
1052 * important to be fast than accurate.
1053 */
1054 tr->buffer_disabled = 1;
1055 /* Make the flag seen by readers */
1056 smp_wmb();
1057}
1058
Steven Rostedt499e5472012-02-22 15:50:28 -05001059/**
1060 * tracing_off - turn off tracing buffers
1061 *
1062 * This function stops the tracing buffers from recording data.
1063 * It does not disable any overhead the tracers themselves may
1064 * be causing. This function simply causes all recording to
1065 * the ring buffers to fail.
1066 */
1067void tracing_off(void)
1068{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001069 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001070}
1071EXPORT_SYMBOL_GPL(tracing_off);
1072
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001073void disable_trace_on_warning(void)
1074{
1075 if (__disable_trace_on_warning)
1076 tracing_off();
1077}
1078
Steven Rostedt499e5472012-02-22 15:50:28 -05001079/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001080 * tracer_tracing_is_on - show real state of ring buffer enabled
1081 * @tr : the trace array to know if ring buffer is enabled
1082 *
1083 * Shows real state of the ring buffer if it is enabled or not.
1084 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001085int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001086{
1087 if (tr->trace_buffer.buffer)
1088 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1089 return !tr->buffer_disabled;
1090}
1091
Steven Rostedt499e5472012-02-22 15:50:28 -05001092/**
1093 * tracing_is_on - show state of ring buffers enabled
1094 */
1095int tracing_is_on(void)
1096{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001097 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001098}
1099EXPORT_SYMBOL_GPL(tracing_is_on);
1100
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001101static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001102{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001103 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001104
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001105 if (!str)
1106 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001107 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001108 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001109 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001110 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001111 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001112 return 1;
1113}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001114__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001115
Tim Bird0e950172010-02-25 15:36:43 -08001116static int __init set_tracing_thresh(char *str)
1117{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001118 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001119 int ret;
1120
1121 if (!str)
1122 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001123 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001124 if (ret < 0)
1125 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001126 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001127 return 1;
1128}
1129__setup("tracing_thresh=", set_tracing_thresh);
1130
Steven Rostedt57f50be2008-05-12 21:20:44 +02001131unsigned long nsecs_to_usecs(unsigned long nsecs)
1132{
1133 return nsecs / 1000;
1134}
1135
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001136/*
1137 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1138 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1139 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1140 * of strings in the order that the enums were defined.
1141 */
1142#undef C
1143#define C(a, b) b
1144
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001145/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001146static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001147 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001148 NULL
1149};
1150
Zhaolei5079f322009-08-25 16:12:56 +08001151static struct {
1152 u64 (*func)(void);
1153 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001154 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001155} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001156 { trace_clock_local, "local", 1 },
1157 { trace_clock_global, "global", 1 },
1158 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001159 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001160 { trace_clock, "perf", 1 },
1161 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001162 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Joel Fernandes80ec3552016-11-28 14:35:23 -08001163 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001164 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001165};
1166
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001167/*
1168 * trace_parser_get_init - gets the buffer for trace parser
1169 */
1170int trace_parser_get_init(struct trace_parser *parser, int size)
1171{
1172 memset(parser, 0, sizeof(*parser));
1173
1174 parser->buffer = kmalloc(size, GFP_KERNEL);
1175 if (!parser->buffer)
1176 return 1;
1177
1178 parser->size = size;
1179 return 0;
1180}
1181
1182/*
1183 * trace_parser_put - frees the buffer for trace parser
1184 */
1185void trace_parser_put(struct trace_parser *parser)
1186{
1187 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001188 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001189}
1190
1191/*
1192 * trace_get_user - reads the user input string separated by space
1193 * (matched by isspace(ch))
1194 *
1195 * For each string found the 'struct trace_parser' is updated,
1196 * and the function returns.
1197 *
1198 * Returns number of bytes read.
1199 *
1200 * See kernel/trace/trace.h for 'struct trace_parser' details.
1201 */
1202int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1203 size_t cnt, loff_t *ppos)
1204{
1205 char ch;
1206 size_t read = 0;
1207 ssize_t ret;
1208
1209 if (!*ppos)
1210 trace_parser_clear(parser);
1211
1212 ret = get_user(ch, ubuf++);
1213 if (ret)
1214 goto out;
1215
1216 read++;
1217 cnt--;
1218
1219 /*
1220 * The parser is not finished with the last write,
1221 * continue reading the user input without skipping spaces.
1222 */
1223 if (!parser->cont) {
1224 /* skip white space */
1225 while (cnt && isspace(ch)) {
1226 ret = get_user(ch, ubuf++);
1227 if (ret)
1228 goto out;
1229 read++;
1230 cnt--;
1231 }
1232
1233 /* only spaces were written */
1234 if (isspace(ch)) {
1235 *ppos += read;
1236 ret = read;
1237 goto out;
1238 }
1239
1240 parser->idx = 0;
1241 }
1242
1243 /* read the non-space input */
1244 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001245 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001246 parser->buffer[parser->idx++] = ch;
1247 else {
1248 ret = -EINVAL;
1249 goto out;
1250 }
1251 ret = get_user(ch, ubuf++);
1252 if (ret)
1253 goto out;
1254 read++;
1255 cnt--;
1256 }
1257
1258 /* We either got finished input or we have to wait for another call. */
1259 if (isspace(ch)) {
1260 parser->buffer[parser->idx] = 0;
1261 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001262 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001263 parser->cont = true;
1264 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001265 } else {
1266 ret = -EINVAL;
1267 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001268 }
1269
1270 *ppos += read;
1271 ret = read;
1272
1273out:
1274 return ret;
1275}
1276
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001277/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001278static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001279{
1280 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001281
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001282 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001283 return -EBUSY;
1284
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001285 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001286 if (cnt > len)
1287 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001288 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001289
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001290 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001291 return cnt;
1292}
1293
Tim Bird0e950172010-02-25 15:36:43 -08001294unsigned long __read_mostly tracing_thresh;
1295
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001296#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001297/*
1298 * Copy the new maximum trace into the separate maximum-trace
1299 * structure. (this way the maximum trace is permanently saved,
1300 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1301 */
1302static void
1303__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1304{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001305 struct trace_buffer *trace_buf = &tr->trace_buffer;
1306 struct trace_buffer *max_buf = &tr->max_buffer;
1307 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1308 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001309
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001310 max_buf->cpu = cpu;
1311 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001312
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001313 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001314 max_data->critical_start = data->critical_start;
1315 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001316
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001317 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001318 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001319 /*
1320 * If tsk == current, then use current_uid(), as that does not use
1321 * RCU. The irq tracer can be called out of RCU scope.
1322 */
1323 if (tsk == current)
1324 max_data->uid = current_uid();
1325 else
1326 max_data->uid = task_uid(tsk);
1327
Steven Rostedt8248ac02009-09-02 12:27:41 -04001328 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1329 max_data->policy = tsk->policy;
1330 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001331
1332 /* record this tasks comm */
1333 tracing_record_cmdline(tsk);
1334}
1335
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001336/**
1337 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1338 * @tr: tracer
1339 * @tsk: the task with the latency
1340 * @cpu: The cpu that initiated the trace.
1341 *
1342 * Flip the buffers between the @tr and the max_tr and record information
1343 * about which task was the cause of this latency.
1344 */
Ingo Molnare309b412008-05-12 21:20:51 +02001345void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001346update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1347{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001348 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001349
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001350 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001351 return;
1352
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001353 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001354
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001355 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001356 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001357 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001358 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001359 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001360
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001361 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001362
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001363 buf = tr->trace_buffer.buffer;
1364 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1365 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001366
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001367 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001368 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001369}
1370
1371/**
1372 * update_max_tr_single - only copy one trace over, and reset the rest
1373 * @tr - tracer
1374 * @tsk - task with the latency
1375 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001376 *
1377 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001378 */
Ingo Molnare309b412008-05-12 21:20:51 +02001379void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001380update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1381{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001382 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001383
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001384 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001385 return;
1386
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001387 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001388 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001389 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001390 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001391 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001392 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001393
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001394 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001395
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001396 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001397
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001398 if (ret == -EBUSY) {
1399 /*
1400 * We failed to swap the buffer due to a commit taking
1401 * place on this CPU. We fail to record, but we reset
1402 * the max trace buffer (no one writes directly to it)
1403 * and flag that it failed.
1404 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001405 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001406 "Failed to swap buffers due to commit in progress\n");
1407 }
1408
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001409 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001410
1411 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001412 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001413}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001414#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001415
Rabin Vincente30f53a2014-11-10 19:46:34 +01001416static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001417{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001418 /* Iterators are static, they should be filled or empty */
1419 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001420 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001421
Rabin Vincente30f53a2014-11-10 19:46:34 +01001422 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1423 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001424}
1425
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001426#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001427static bool selftests_can_run;
1428
1429struct trace_selftests {
1430 struct list_head list;
1431 struct tracer *type;
1432};
1433
1434static LIST_HEAD(postponed_selftests);
1435
1436static int save_selftest(struct tracer *type)
1437{
1438 struct trace_selftests *selftest;
1439
1440 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1441 if (!selftest)
1442 return -ENOMEM;
1443
1444 selftest->type = type;
1445 list_add(&selftest->list, &postponed_selftests);
1446 return 0;
1447}
1448
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001449static int run_tracer_selftest(struct tracer *type)
1450{
1451 struct trace_array *tr = &global_trace;
1452 struct tracer *saved_tracer = tr->current_trace;
1453 int ret;
1454
1455 if (!type->selftest || tracing_selftest_disabled)
1456 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001457
1458 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001459 * If a tracer registers early in boot up (before scheduling is
1460 * initialized and such), then do not run its selftests yet.
1461 * Instead, run it a little later in the boot process.
1462 */
1463 if (!selftests_can_run)
1464 return save_selftest(type);
1465
1466 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001467 * Run a selftest on this tracer.
1468 * Here we reset the trace buffer, and set the current
1469 * tracer to be this tracer. The tracer can then run some
1470 * internal tracing to verify that everything is in order.
1471 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001472 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001473 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001474
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001475 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001476
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001477#ifdef CONFIG_TRACER_MAX_TRACE
1478 if (type->use_max_tr) {
1479 /* If we expanded the buffers, make sure the max is expanded too */
1480 if (ring_buffer_expanded)
1481 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1482 RING_BUFFER_ALL_CPUS);
1483 tr->allocated_snapshot = true;
1484 }
1485#endif
1486
1487 /* the test is responsible for initializing and enabling */
1488 pr_info("Testing tracer %s: ", type->name);
1489 ret = type->selftest(type, tr);
1490 /* the test is responsible for resetting too */
1491 tr->current_trace = saved_tracer;
1492 if (ret) {
1493 printk(KERN_CONT "FAILED!\n");
1494 /* Add the warning after printing 'FAILED' */
1495 WARN_ON(1);
1496 return -1;
1497 }
1498 /* Only reset on passing, to avoid touching corrupted buffers */
1499 tracing_reset_online_cpus(&tr->trace_buffer);
1500
1501#ifdef CONFIG_TRACER_MAX_TRACE
1502 if (type->use_max_tr) {
1503 tr->allocated_snapshot = false;
1504
1505 /* Shrink the max buffer again */
1506 if (ring_buffer_expanded)
1507 ring_buffer_resize(tr->max_buffer.buffer, 1,
1508 RING_BUFFER_ALL_CPUS);
1509 }
1510#endif
1511
1512 printk(KERN_CONT "PASSED\n");
1513 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001514}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001515
1516static __init int init_trace_selftests(void)
1517{
1518 struct trace_selftests *p, *n;
1519 struct tracer *t, **last;
1520 int ret;
1521
1522 selftests_can_run = true;
1523
1524 mutex_lock(&trace_types_lock);
1525
1526 if (list_empty(&postponed_selftests))
1527 goto out;
1528
1529 pr_info("Running postponed tracer tests:\n");
1530
1531 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1532 ret = run_tracer_selftest(p->type);
1533 /* If the test fails, then warn and remove from available_tracers */
1534 if (ret < 0) {
1535 WARN(1, "tracer: %s failed selftest, disabling\n",
1536 p->type->name);
1537 last = &trace_types;
1538 for (t = trace_types; t; t = t->next) {
1539 if (t == p->type) {
1540 *last = t->next;
1541 break;
1542 }
1543 last = &t->next;
1544 }
1545 }
1546 list_del(&p->list);
1547 kfree(p);
1548 }
1549
1550 out:
1551 mutex_unlock(&trace_types_lock);
1552
1553 return 0;
1554}
1555early_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001556#else
1557static inline int run_tracer_selftest(struct tracer *type)
1558{
1559 return 0;
1560}
1561#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001562
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001563static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1564
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001565static void __init apply_trace_boot_options(void);
1566
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001567/**
1568 * register_tracer - register a tracer with the ftrace system.
1569 * @type - the plugin for the tracer
1570 *
1571 * Register a new plugin tracer.
1572 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001573int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001574{
1575 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576 int ret = 0;
1577
1578 if (!type->name) {
1579 pr_info("Tracer must have a name\n");
1580 return -1;
1581 }
1582
Dan Carpenter24a461d2010-07-10 12:06:44 +02001583 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001584 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1585 return -1;
1586 }
1587
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001589
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001590 tracing_selftest_running = true;
1591
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001592 for (t = trace_types; t; t = t->next) {
1593 if (strcmp(type->name, t->name) == 0) {
1594 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001595 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001596 type->name);
1597 ret = -1;
1598 goto out;
1599 }
1600 }
1601
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001602 if (!type->set_flag)
1603 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001604 if (!type->flags) {
1605 /*allocate a dummy tracer_flags*/
1606 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001607 if (!type->flags) {
1608 ret = -ENOMEM;
1609 goto out;
1610 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001611 type->flags->val = 0;
1612 type->flags->opts = dummy_tracer_opt;
1613 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001614 if (!type->flags->opts)
1615 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001616
Chunyu Hud39cdd22016-03-08 21:37:01 +08001617 /* store the tracer for __set_tracer_option */
1618 type->flags->trace = type;
1619
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001620 ret = run_tracer_selftest(type);
1621 if (ret < 0)
1622 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001623
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001624 type->next = trace_types;
1625 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001626 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001627
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001628 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001629 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001630 mutex_unlock(&trace_types_lock);
1631
Steven Rostedtdac74942009-02-05 01:13:38 -05001632 if (ret || !default_bootup_tracer)
1633 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001634
Li Zefanee6c2c12009-09-18 14:06:47 +08001635 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001636 goto out_unlock;
1637
1638 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1639 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001640 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001641 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001642
1643 apply_trace_boot_options();
1644
Steven Rostedtdac74942009-02-05 01:13:38 -05001645 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001646 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001647#ifdef CONFIG_FTRACE_STARTUP_TEST
1648 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1649 type->name);
1650#endif
1651
1652 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001653 return ret;
1654}
1655
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001656void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001657{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001658 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001659
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001660 if (!buffer)
1661 return;
1662
Steven Rostedtf6339032009-09-04 12:35:16 -04001663 ring_buffer_record_disable(buffer);
1664
1665 /* Make sure all commits have finished */
1666 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001667 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001668
1669 ring_buffer_record_enable(buffer);
1670}
1671
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001672void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001673{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001674 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001675 int cpu;
1676
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001677 if (!buffer)
1678 return;
1679
Steven Rostedt621968c2009-09-04 12:02:35 -04001680 ring_buffer_record_disable(buffer);
1681
1682 /* Make sure all commits have finished */
1683 synchronize_sched();
1684
Alexander Z Lam94571582013-08-02 18:36:16 -07001685 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001686
1687 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001688 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001689
1690 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001691}
1692
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001693/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001694void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001695{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001696 struct trace_array *tr;
1697
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001698 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001699 tracing_reset_online_cpus(&tr->trace_buffer);
1700#ifdef CONFIG_TRACER_MAX_TRACE
1701 tracing_reset_online_cpus(&tr->max_buffer);
1702#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001703 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001704}
1705
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001706#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001707#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001708static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001709struct saved_cmdlines_buffer {
1710 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1711 unsigned *map_cmdline_to_pid;
1712 unsigned cmdline_num;
1713 int cmdline_idx;
1714 char *saved_cmdlines;
1715};
1716static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001717
Steven Rostedt25b0b442008-05-12 21:21:00 +02001718/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001719static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001720
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001721static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001722{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001723 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1724}
1725
1726static inline void set_cmdline(int idx, const char *cmdline)
1727{
1728 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1729}
1730
1731static int allocate_cmdlines_buffer(unsigned int val,
1732 struct saved_cmdlines_buffer *s)
1733{
1734 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1735 GFP_KERNEL);
1736 if (!s->map_cmdline_to_pid)
1737 return -ENOMEM;
1738
1739 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1740 if (!s->saved_cmdlines) {
1741 kfree(s->map_cmdline_to_pid);
1742 return -ENOMEM;
1743 }
1744
1745 s->cmdline_idx = 0;
1746 s->cmdline_num = val;
1747 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1748 sizeof(s->map_pid_to_cmdline));
1749 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1750 val * sizeof(*s->map_cmdline_to_pid));
1751
1752 return 0;
1753}
1754
1755static int trace_create_savedcmd(void)
1756{
1757 int ret;
1758
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001759 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001760 if (!savedcmd)
1761 return -ENOMEM;
1762
1763 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1764 if (ret < 0) {
1765 kfree(savedcmd);
1766 savedcmd = NULL;
1767 return -ENOMEM;
1768 }
1769
1770 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001771}
1772
Carsten Emdeb5130b12009-09-13 01:43:07 +02001773int is_tracing_stopped(void)
1774{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001775 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001776}
1777
Steven Rostedt0f048702008-11-05 16:05:44 -05001778/**
1779 * tracing_start - quick start of the tracer
1780 *
1781 * If tracing is enabled but was stopped by tracing_stop,
1782 * this will start the tracer back up.
1783 */
1784void tracing_start(void)
1785{
1786 struct ring_buffer *buffer;
1787 unsigned long flags;
1788
1789 if (tracing_disabled)
1790 return;
1791
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001792 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1793 if (--global_trace.stop_count) {
1794 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001795 /* Someone screwed up their debugging */
1796 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001797 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001798 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001799 goto out;
1800 }
1801
Steven Rostedta2f80712010-03-12 19:56:00 -05001802 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001803 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001804
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001805 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001806 if (buffer)
1807 ring_buffer_record_enable(buffer);
1808
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001809#ifdef CONFIG_TRACER_MAX_TRACE
1810 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001811 if (buffer)
1812 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001813#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001814
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001815 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001816
Steven Rostedt0f048702008-11-05 16:05:44 -05001817 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001818 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1819}
1820
1821static void tracing_start_tr(struct trace_array *tr)
1822{
1823 struct ring_buffer *buffer;
1824 unsigned long flags;
1825
1826 if (tracing_disabled)
1827 return;
1828
1829 /* If global, we need to also start the max tracer */
1830 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1831 return tracing_start();
1832
1833 raw_spin_lock_irqsave(&tr->start_lock, flags);
1834
1835 if (--tr->stop_count) {
1836 if (tr->stop_count < 0) {
1837 /* Someone screwed up their debugging */
1838 WARN_ON_ONCE(1);
1839 tr->stop_count = 0;
1840 }
1841 goto out;
1842 }
1843
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001844 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001845 if (buffer)
1846 ring_buffer_record_enable(buffer);
1847
1848 out:
1849 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001850}
1851
1852/**
1853 * tracing_stop - quick stop of the tracer
1854 *
1855 * Light weight way to stop tracing. Use in conjunction with
1856 * tracing_start.
1857 */
1858void tracing_stop(void)
1859{
1860 struct ring_buffer *buffer;
1861 unsigned long flags;
1862
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001863 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1864 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001865 goto out;
1866
Steven Rostedta2f80712010-03-12 19:56:00 -05001867 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001868 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001869
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001870 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001871 if (buffer)
1872 ring_buffer_record_disable(buffer);
1873
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001874#ifdef CONFIG_TRACER_MAX_TRACE
1875 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001876 if (buffer)
1877 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001878#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001879
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001880 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001881
Steven Rostedt0f048702008-11-05 16:05:44 -05001882 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001883 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1884}
1885
1886static void tracing_stop_tr(struct trace_array *tr)
1887{
1888 struct ring_buffer *buffer;
1889 unsigned long flags;
1890
1891 /* If global, we need to also stop the max tracer */
1892 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1893 return tracing_stop();
1894
1895 raw_spin_lock_irqsave(&tr->start_lock, flags);
1896 if (tr->stop_count++)
1897 goto out;
1898
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001899 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001900 if (buffer)
1901 ring_buffer_record_disable(buffer);
1902
1903 out:
1904 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001905}
1906
Ingo Molnare309b412008-05-12 21:20:51 +02001907void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001908
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001909static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001910{
Carsten Emdea635cf02009-03-18 09:00:41 +01001911 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001912
1913 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001914 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001915
1916 /*
1917 * It's not the end of the world if we don't get
1918 * the lock, but we also don't want to spin
1919 * nor do we want to disable interrupts,
1920 * so if we miss here, then better luck next time.
1921 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001922 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001923 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001924
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001925 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001926 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001927 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001928
Carsten Emdea635cf02009-03-18 09:00:41 +01001929 /*
1930 * Check whether the cmdline buffer at idx has a pid
1931 * mapped. We are going to overwrite that entry so we
1932 * need to clear the map_pid_to_cmdline. Otherwise we
1933 * would read the new comm for the old pid.
1934 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001935 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001936 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001937 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001938
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001939 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1940 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001941
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001942 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001943 }
1944
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001945 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001946
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001947 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001948
1949 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001950}
1951
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001952static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001953{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001954 unsigned map;
1955
Steven Rostedt4ca530852009-03-16 19:20:15 -04001956 if (!pid) {
1957 strcpy(comm, "<idle>");
1958 return;
1959 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001960
Steven Rostedt74bf4072010-01-25 15:11:53 -05001961 if (WARN_ON_ONCE(pid < 0)) {
1962 strcpy(comm, "<XXX>");
1963 return;
1964 }
1965
Steven Rostedt4ca530852009-03-16 19:20:15 -04001966 if (pid > PID_MAX_DEFAULT) {
1967 strcpy(comm, "<...>");
1968 return;
1969 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001970
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001971 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001972 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001973 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001974 else
1975 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001976}
1977
1978void trace_find_cmdline(int pid, char comm[])
1979{
1980 preempt_disable();
1981 arch_spin_lock(&trace_cmdline_lock);
1982
1983 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001984
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001985 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001986 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001987}
1988
Ingo Molnare309b412008-05-12 21:20:51 +02001989void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001990{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001991 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001992 return;
1993
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001994 if (!__this_cpu_read(trace_cmdline_save))
1995 return;
1996
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001997 if (trace_save_cmdline(tsk))
1998 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001999}
2000
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002001/*
2002 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2003 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2004 * simplifies those functions and keeps them in sync.
2005 */
2006enum print_line_t trace_handle_return(struct trace_seq *s)
2007{
2008 return trace_seq_has_overflowed(s) ?
2009 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2010}
2011EXPORT_SYMBOL_GPL(trace_handle_return);
2012
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002013void
Steven Rostedt38697052008-10-01 13:14:09 -04002014tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2015 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002016{
2017 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002018
Steven Rostedt777e2082008-09-29 23:02:42 -04002019 entry->preempt_count = pc & 0xff;
2020 entry->pid = (tsk) ? tsk->pid : 0;
2021 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002022#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002023 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002024#else
2025 TRACE_FLAG_IRQS_NOSUPPORT |
2026#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002027 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002028 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302029 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002030 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2031 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002032}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002033EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002034
Steven Rostedte77405a2009-09-02 14:17:06 -04002035struct ring_buffer_event *
2036trace_buffer_lock_reserve(struct ring_buffer *buffer,
2037 int type,
2038 unsigned long len,
2039 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002040{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002041 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002042}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002043
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002044DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2045DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2046static int trace_buffered_event_ref;
2047
2048/**
2049 * trace_buffered_event_enable - enable buffering events
2050 *
2051 * When events are being filtered, it is quicker to use a temporary
2052 * buffer to write the event data into if there's a likely chance
2053 * that it will not be committed. The discard of the ring buffer
2054 * is not as fast as committing, and is much slower than copying
2055 * a commit.
2056 *
2057 * When an event is to be filtered, allocate per cpu buffers to
2058 * write the event data into, and if the event is filtered and discarded
2059 * it is simply dropped, otherwise, the entire data is to be committed
2060 * in one shot.
2061 */
2062void trace_buffered_event_enable(void)
2063{
2064 struct ring_buffer_event *event;
2065 struct page *page;
2066 int cpu;
2067
2068 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2069
2070 if (trace_buffered_event_ref++)
2071 return;
2072
2073 for_each_tracing_cpu(cpu) {
2074 page = alloc_pages_node(cpu_to_node(cpu),
2075 GFP_KERNEL | __GFP_NORETRY, 0);
2076 if (!page)
2077 goto failed;
2078
2079 event = page_address(page);
2080 memset(event, 0, sizeof(*event));
2081
2082 per_cpu(trace_buffered_event, cpu) = event;
2083
2084 preempt_disable();
2085 if (cpu == smp_processor_id() &&
2086 this_cpu_read(trace_buffered_event) !=
2087 per_cpu(trace_buffered_event, cpu))
2088 WARN_ON_ONCE(1);
2089 preempt_enable();
2090 }
2091
2092 return;
2093 failed:
2094 trace_buffered_event_disable();
2095}
2096
2097static void enable_trace_buffered_event(void *data)
2098{
2099 /* Probably not needed, but do it anyway */
2100 smp_rmb();
2101 this_cpu_dec(trace_buffered_event_cnt);
2102}
2103
2104static void disable_trace_buffered_event(void *data)
2105{
2106 this_cpu_inc(trace_buffered_event_cnt);
2107}
2108
2109/**
2110 * trace_buffered_event_disable - disable buffering events
2111 *
2112 * When a filter is removed, it is faster to not use the buffered
2113 * events, and to commit directly into the ring buffer. Free up
2114 * the temp buffers when there are no more users. This requires
2115 * special synchronization with current events.
2116 */
2117void trace_buffered_event_disable(void)
2118{
2119 int cpu;
2120
2121 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2122
2123 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2124 return;
2125
2126 if (--trace_buffered_event_ref)
2127 return;
2128
2129 preempt_disable();
2130 /* For each CPU, set the buffer as used. */
2131 smp_call_function_many(tracing_buffer_mask,
2132 disable_trace_buffered_event, NULL, 1);
2133 preempt_enable();
2134
2135 /* Wait for all current users to finish */
2136 synchronize_sched();
2137
2138 for_each_tracing_cpu(cpu) {
2139 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2140 per_cpu(trace_buffered_event, cpu) = NULL;
2141 }
2142 /*
2143 * Make sure trace_buffered_event is NULL before clearing
2144 * trace_buffered_event_cnt.
2145 */
2146 smp_wmb();
2147
2148 preempt_disable();
2149 /* Do the work on each cpu */
2150 smp_call_function_many(tracing_buffer_mask,
2151 enable_trace_buffered_event, NULL, 1);
2152 preempt_enable();
2153}
2154
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002155static struct ring_buffer *temp_buffer;
2156
Steven Rostedtef5580d2009-02-27 19:38:04 -05002157struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002158trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002159 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002160 int type, unsigned long len,
2161 unsigned long flags, int pc)
2162{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002163 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002164 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002165
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002166 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002167
2168 if ((trace_file->flags &
2169 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2170 (entry = this_cpu_read(trace_buffered_event))) {
2171 /* Try to use the per cpu buffer first */
2172 val = this_cpu_inc_return(trace_buffered_event_cnt);
2173 if (val == 1) {
2174 trace_event_setup(entry, type, flags, pc);
2175 entry->array[0] = len;
2176 return entry;
2177 }
2178 this_cpu_dec(trace_buffered_event_cnt);
2179 }
2180
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002181 entry = __trace_buffer_lock_reserve(*current_rb,
2182 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002183 /*
2184 * If tracing is off, but we have triggers enabled
2185 * we still need to look at the event data. Use the temp_buffer
2186 * to store the trace event for the tigger to use. It's recusive
2187 * safe and will not be recorded anywhere.
2188 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002189 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002190 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002191 entry = __trace_buffer_lock_reserve(*current_rb,
2192 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002193 }
2194 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002195}
2196EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2197
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002198static DEFINE_SPINLOCK(tracepoint_iter_lock);
2199static DEFINE_MUTEX(tracepoint_printk_mutex);
2200
2201static void output_printk(struct trace_event_buffer *fbuffer)
2202{
2203 struct trace_event_call *event_call;
2204 struct trace_event *event;
2205 unsigned long flags;
2206 struct trace_iterator *iter = tracepoint_print_iter;
2207
2208 /* We should never get here if iter is NULL */
2209 if (WARN_ON_ONCE(!iter))
2210 return;
2211
2212 event_call = fbuffer->trace_file->event_call;
2213 if (!event_call || !event_call->event.funcs ||
2214 !event_call->event.funcs->trace)
2215 return;
2216
2217 event = &fbuffer->trace_file->event_call->event;
2218
2219 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2220 trace_seq_init(&iter->seq);
2221 iter->ent = fbuffer->entry;
2222 event_call->event.funcs->trace(iter, 0, event);
2223 trace_seq_putc(&iter->seq, 0);
2224 printk("%s", iter->seq.buffer);
2225
2226 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2227}
2228
2229int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2230 void __user *buffer, size_t *lenp,
2231 loff_t *ppos)
2232{
2233 int save_tracepoint_printk;
2234 int ret;
2235
2236 mutex_lock(&tracepoint_printk_mutex);
2237 save_tracepoint_printk = tracepoint_printk;
2238
2239 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2240
2241 /*
2242 * This will force exiting early, as tracepoint_printk
2243 * is always zero when tracepoint_printk_iter is not allocated
2244 */
2245 if (!tracepoint_print_iter)
2246 tracepoint_printk = 0;
2247
2248 if (save_tracepoint_printk == tracepoint_printk)
2249 goto out;
2250
2251 if (tracepoint_printk)
2252 static_key_enable(&tracepoint_printk_key.key);
2253 else
2254 static_key_disable(&tracepoint_printk_key.key);
2255
2256 out:
2257 mutex_unlock(&tracepoint_printk_mutex);
2258
2259 return ret;
2260}
2261
2262void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2263{
2264 if (static_key_false(&tracepoint_printk_key.key))
2265 output_printk(fbuffer);
2266
2267 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2268 fbuffer->event, fbuffer->entry,
2269 fbuffer->flags, fbuffer->pc);
2270}
2271EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2272
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002273void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2274 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002275 struct ring_buffer_event *event,
2276 unsigned long flags, int pc,
2277 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002278{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002279 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002280
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002281 /*
2282 * If regs is not set, then skip the following callers:
2283 * trace_buffer_unlock_commit_regs
2284 * event_trigger_unlock_commit
2285 * trace_event_buffer_commit
2286 * trace_event_raw_event_sched_switch
2287 * Note, we can still get here via blktrace, wakeup tracer
2288 * and mmiotrace, but that's ok if they lose a function or
2289 * two. They are that meaningful.
2290 */
2291 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002292 ftrace_trace_userstack(buffer, flags, pc);
2293}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002294
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002295/*
2296 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2297 */
2298void
2299trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2300 struct ring_buffer_event *event)
2301{
2302 __buffer_unlock_commit(buffer, event);
2303}
2304
Chunyan Zhang478409d2016-11-21 15:57:18 +08002305static void
2306trace_process_export(struct trace_export *export,
2307 struct ring_buffer_event *event)
2308{
2309 struct trace_entry *entry;
2310 unsigned int size = 0;
2311
2312 entry = ring_buffer_event_data(event);
2313 size = ring_buffer_event_length(event);
2314 export->write(entry, size);
2315}
2316
2317static DEFINE_MUTEX(ftrace_export_lock);
2318
2319static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2320
2321static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2322
2323static inline void ftrace_exports_enable(void)
2324{
2325 static_branch_enable(&ftrace_exports_enabled);
2326}
2327
2328static inline void ftrace_exports_disable(void)
2329{
2330 static_branch_disable(&ftrace_exports_enabled);
2331}
2332
2333void ftrace_exports(struct ring_buffer_event *event)
2334{
2335 struct trace_export *export;
2336
2337 preempt_disable_notrace();
2338
2339 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2340 while (export) {
2341 trace_process_export(export, event);
2342 export = rcu_dereference_raw_notrace(export->next);
2343 }
2344
2345 preempt_enable_notrace();
2346}
2347
2348static inline void
2349add_trace_export(struct trace_export **list, struct trace_export *export)
2350{
2351 rcu_assign_pointer(export->next, *list);
2352 /*
2353 * We are entering export into the list but another
2354 * CPU might be walking that list. We need to make sure
2355 * the export->next pointer is valid before another CPU sees
2356 * the export pointer included into the list.
2357 */
2358 rcu_assign_pointer(*list, export);
2359}
2360
2361static inline int
2362rm_trace_export(struct trace_export **list, struct trace_export *export)
2363{
2364 struct trace_export **p;
2365
2366 for (p = list; *p != NULL; p = &(*p)->next)
2367 if (*p == export)
2368 break;
2369
2370 if (*p != export)
2371 return -1;
2372
2373 rcu_assign_pointer(*p, (*p)->next);
2374
2375 return 0;
2376}
2377
2378static inline void
2379add_ftrace_export(struct trace_export **list, struct trace_export *export)
2380{
2381 if (*list == NULL)
2382 ftrace_exports_enable();
2383
2384 add_trace_export(list, export);
2385}
2386
2387static inline int
2388rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2389{
2390 int ret;
2391
2392 ret = rm_trace_export(list, export);
2393 if (*list == NULL)
2394 ftrace_exports_disable();
2395
2396 return ret;
2397}
2398
2399int register_ftrace_export(struct trace_export *export)
2400{
2401 if (WARN_ON_ONCE(!export->write))
2402 return -1;
2403
2404 mutex_lock(&ftrace_export_lock);
2405
2406 add_ftrace_export(&ftrace_exports_list, export);
2407
2408 mutex_unlock(&ftrace_export_lock);
2409
2410 return 0;
2411}
2412EXPORT_SYMBOL_GPL(register_ftrace_export);
2413
2414int unregister_ftrace_export(struct trace_export *export)
2415{
2416 int ret;
2417
2418 mutex_lock(&ftrace_export_lock);
2419
2420 ret = rm_ftrace_export(&ftrace_exports_list, export);
2421
2422 mutex_unlock(&ftrace_export_lock);
2423
2424 return ret;
2425}
2426EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2427
Ingo Molnare309b412008-05-12 21:20:51 +02002428void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002429trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002430 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2431 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002432{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002433 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002434 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002435 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002436 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002437
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002438 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2439 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002440 if (!event)
2441 return;
2442 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002443 entry->ip = ip;
2444 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002445
Chunyan Zhang478409d2016-11-21 15:57:18 +08002446 if (!call_filter_check_discard(call, entry, buffer, event)) {
2447 if (static_branch_unlikely(&ftrace_exports_enabled))
2448 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002449 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002450 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002451}
2452
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002453#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002454
2455#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2456struct ftrace_stack {
2457 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2458};
2459
2460static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2461static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2462
Steven Rostedte77405a2009-09-02 14:17:06 -04002463static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002464 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002465 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002466{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002467 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002468 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002469 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002470 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002471 int use_stack;
2472 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002473
2474 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002475 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002476
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002477 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002478 * Add two, for this function and the call to save_stack_trace()
2479 * If regs is set, then these functions will not be in the way.
2480 */
2481 if (!regs)
2482 trace.skip += 2;
2483
2484 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002485 * Since events can happen in NMIs there's no safe way to
2486 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2487 * or NMI comes in, it will just have to use the default
2488 * FTRACE_STACK_SIZE.
2489 */
2490 preempt_disable_notrace();
2491
Shan Wei82146522012-11-19 13:21:01 +08002492 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002493 /*
2494 * We don't need any atomic variables, just a barrier.
2495 * If an interrupt comes in, we don't care, because it would
2496 * have exited and put the counter back to what we want.
2497 * We just need a barrier to keep gcc from moving things
2498 * around.
2499 */
2500 barrier();
2501 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002502 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002503 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2504
2505 if (regs)
2506 save_stack_trace_regs(regs, &trace);
2507 else
2508 save_stack_trace(&trace);
2509
2510 if (trace.nr_entries > size)
2511 size = trace.nr_entries;
2512 } else
2513 /* From now on, use_stack is a boolean */
2514 use_stack = 0;
2515
2516 size *= sizeof(unsigned long);
2517
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002518 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2519 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002520 if (!event)
2521 goto out;
2522 entry = ring_buffer_event_data(event);
2523
2524 memset(&entry->caller, 0, size);
2525
2526 if (use_stack)
2527 memcpy(&entry->caller, trace.entries,
2528 trace.nr_entries * sizeof(unsigned long));
2529 else {
2530 trace.max_entries = FTRACE_STACK_ENTRIES;
2531 trace.entries = entry->caller;
2532 if (regs)
2533 save_stack_trace_regs(regs, &trace);
2534 else
2535 save_stack_trace(&trace);
2536 }
2537
2538 entry->size = trace.nr_entries;
2539
Tom Zanussif306cc82013-10-24 08:34:17 -05002540 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002541 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002542
2543 out:
2544 /* Again, don't let gcc optimize things here */
2545 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002546 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002547 preempt_enable_notrace();
2548
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002549}
2550
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002551static inline void ftrace_trace_stack(struct trace_array *tr,
2552 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002553 unsigned long flags,
2554 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002555{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002556 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002557 return;
2558
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002559 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002560}
2561
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002562void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2563 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002564{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002565 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002566}
2567
Steven Rostedt03889382009-12-11 09:48:22 -05002568/**
2569 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002570 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002571 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002572void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002573{
2574 unsigned long flags;
2575
2576 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002577 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002578
2579 local_save_flags(flags);
2580
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002581 /*
2582 * Skip 3 more, seems to get us at the caller of
2583 * this function.
2584 */
2585 skip += 3;
2586 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2587 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002588}
2589
Steven Rostedt91e86e52010-11-10 12:56:12 +01002590static DEFINE_PER_CPU(int, user_stack_count);
2591
Steven Rostedte77405a2009-09-02 14:17:06 -04002592void
2593ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002594{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002595 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002596 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002597 struct userstack_entry *entry;
2598 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002599
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002600 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002601 return;
2602
Steven Rostedtb6345872010-03-12 20:03:30 -05002603 /*
2604 * NMIs can not handle page faults, even with fix ups.
2605 * The save user stack can (and often does) fault.
2606 */
2607 if (unlikely(in_nmi()))
2608 return;
2609
Steven Rostedt91e86e52010-11-10 12:56:12 +01002610 /*
2611 * prevent recursion, since the user stack tracing may
2612 * trigger other kernel events.
2613 */
2614 preempt_disable();
2615 if (__this_cpu_read(user_stack_count))
2616 goto out;
2617
2618 __this_cpu_inc(user_stack_count);
2619
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002620 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2621 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002622 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002623 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002624 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002625
Steven Rostedt48659d32009-09-11 11:36:23 -04002626 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002627 memset(&entry->caller, 0, sizeof(entry->caller));
2628
2629 trace.nr_entries = 0;
2630 trace.max_entries = FTRACE_STACK_ENTRIES;
2631 trace.skip = 0;
2632 trace.entries = entry->caller;
2633
2634 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002635 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002636 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002637
Li Zefan1dbd1952010-12-09 15:47:56 +08002638 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002639 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002640 out:
2641 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002642}
2643
Hannes Eder4fd27352009-02-10 19:44:12 +01002644#ifdef UNUSED
2645static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002646{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002647 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002648}
Hannes Eder4fd27352009-02-10 19:44:12 +01002649#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002650
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002651#endif /* CONFIG_STACKTRACE */
2652
Steven Rostedt07d777f2011-09-22 14:01:55 -04002653/* created for use with alloc_percpu */
2654struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002655 int nesting;
2656 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002657};
2658
2659static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002660
2661/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002662 * Thise allows for lockless recording. If we're nested too deeply, then
2663 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002664 */
2665static char *get_trace_buf(void)
2666{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002667 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002668
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002669 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002670 return NULL;
2671
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002672 return &buffer->buffer[buffer->nesting++][0];
2673}
2674
2675static void put_trace_buf(void)
2676{
2677 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002678}
2679
2680static int alloc_percpu_trace_buffer(void)
2681{
2682 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002683
2684 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002685 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2686 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002687
2688 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002689 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002690}
2691
Steven Rostedt81698832012-10-11 10:15:05 -04002692static int buffers_allocated;
2693
Steven Rostedt07d777f2011-09-22 14:01:55 -04002694void trace_printk_init_buffers(void)
2695{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002696 if (buffers_allocated)
2697 return;
2698
2699 if (alloc_percpu_trace_buffer())
2700 return;
2701
Steven Rostedt2184db42014-05-28 13:14:40 -04002702 /* trace_printk() is for debug use only. Don't use it in production. */
2703
Joe Perchesa395d6a2016-03-22 14:28:09 -07002704 pr_warn("\n");
2705 pr_warn("**********************************************************\n");
2706 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2707 pr_warn("** **\n");
2708 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2709 pr_warn("** **\n");
2710 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2711 pr_warn("** unsafe for production use. **\n");
2712 pr_warn("** **\n");
2713 pr_warn("** If you see this message and you are not debugging **\n");
2714 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2715 pr_warn("** **\n");
2716 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2717 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002718
Steven Rostedtb382ede62012-10-10 21:44:34 -04002719 /* Expand the buffers to set size */
2720 tracing_update_buffers();
2721
Steven Rostedt07d777f2011-09-22 14:01:55 -04002722 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002723
2724 /*
2725 * trace_printk_init_buffers() can be called by modules.
2726 * If that happens, then we need to start cmdline recording
2727 * directly here. If the global_trace.buffer is already
2728 * allocated here, then this was called by module code.
2729 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002730 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002731 tracing_start_cmdline_record();
2732}
2733
2734void trace_printk_start_comm(void)
2735{
2736 /* Start tracing comms if trace printk is set */
2737 if (!buffers_allocated)
2738 return;
2739 tracing_start_cmdline_record();
2740}
2741
2742static void trace_printk_start_stop_comm(int enabled)
2743{
2744 if (!buffers_allocated)
2745 return;
2746
2747 if (enabled)
2748 tracing_start_cmdline_record();
2749 else
2750 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002751}
2752
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002753/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002754 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002755 *
2756 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002757int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002758{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002759 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002760 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002761 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002762 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002763 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002764 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002765 char *tbuffer;
2766 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002767
2768 if (unlikely(tracing_selftest_running || tracing_disabled))
2769 return 0;
2770
2771 /* Don't pollute graph traces with trace_vprintk internals */
2772 pause_graph_tracing();
2773
2774 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002775 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002776
Steven Rostedt07d777f2011-09-22 14:01:55 -04002777 tbuffer = get_trace_buf();
2778 if (!tbuffer) {
2779 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002780 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002781 }
2782
2783 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2784
2785 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002786 goto out;
2787
Steven Rostedt07d777f2011-09-22 14:01:55 -04002788 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002789 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002790 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002791 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2792 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002793 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002794 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002795 entry = ring_buffer_event_data(event);
2796 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002797 entry->fmt = fmt;
2798
Steven Rostedt07d777f2011-09-22 14:01:55 -04002799 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002800 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002801 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002802 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002803 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002804
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002805out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002806 put_trace_buf();
2807
2808out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002809 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002810 unpause_graph_tracing();
2811
2812 return len;
2813}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002814EXPORT_SYMBOL_GPL(trace_vbprintk);
2815
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002816static int
2817__trace_array_vprintk(struct ring_buffer *buffer,
2818 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002819{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002820 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002821 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002822 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002823 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002824 unsigned long flags;
2825 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002826
2827 if (tracing_disabled || tracing_selftest_running)
2828 return 0;
2829
Steven Rostedt07d777f2011-09-22 14:01:55 -04002830 /* Don't pollute graph traces with trace_vprintk internals */
2831 pause_graph_tracing();
2832
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002833 pc = preempt_count();
2834 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002835
Steven Rostedt07d777f2011-09-22 14:01:55 -04002836
2837 tbuffer = get_trace_buf();
2838 if (!tbuffer) {
2839 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002840 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002841 }
2842
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002843 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002844
Steven Rostedt07d777f2011-09-22 14:01:55 -04002845 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002846 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002847 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2848 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002849 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002850 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002851 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002852 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002853
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002854 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002855 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002856 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002857 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002858 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002859
2860out:
2861 put_trace_buf();
2862
2863out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002864 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002865 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002866
2867 return len;
2868}
Steven Rostedt659372d2009-09-03 19:11:07 -04002869
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002870int trace_array_vprintk(struct trace_array *tr,
2871 unsigned long ip, const char *fmt, va_list args)
2872{
2873 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2874}
2875
2876int trace_array_printk(struct trace_array *tr,
2877 unsigned long ip, const char *fmt, ...)
2878{
2879 int ret;
2880 va_list ap;
2881
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002882 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002883 return 0;
2884
2885 va_start(ap, fmt);
2886 ret = trace_array_vprintk(tr, ip, fmt, ap);
2887 va_end(ap);
2888 return ret;
2889}
2890
2891int trace_array_printk_buf(struct ring_buffer *buffer,
2892 unsigned long ip, const char *fmt, ...)
2893{
2894 int ret;
2895 va_list ap;
2896
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002897 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002898 return 0;
2899
2900 va_start(ap, fmt);
2901 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2902 va_end(ap);
2903 return ret;
2904}
2905
Steven Rostedt659372d2009-09-03 19:11:07 -04002906int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2907{
Steven Rostedta813a152009-10-09 01:41:35 -04002908 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002909}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002910EXPORT_SYMBOL_GPL(trace_vprintk);
2911
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002912static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002913{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002914 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2915
Steven Rostedt5a90f572008-09-03 17:42:51 -04002916 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002917 if (buf_iter)
2918 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002919}
2920
Ingo Molnare309b412008-05-12 21:20:51 +02002921static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002922peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2923 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002924{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002925 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002926 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002927
Steven Rostedtd7690412008-10-01 00:29:53 -04002928 if (buf_iter)
2929 event = ring_buffer_iter_peek(buf_iter, ts);
2930 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002931 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002932 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002933
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002934 if (event) {
2935 iter->ent_size = ring_buffer_event_length(event);
2936 return ring_buffer_event_data(event);
2937 }
2938 iter->ent_size = 0;
2939 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002940}
Steven Rostedtd7690412008-10-01 00:29:53 -04002941
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002942static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002943__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2944 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002945{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002946 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002947 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002948 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002949 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002950 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002951 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002952 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002953 int cpu;
2954
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002955 /*
2956 * If we are in a per_cpu trace file, don't bother by iterating over
2957 * all cpu and peek directly.
2958 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002959 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002960 if (ring_buffer_empty_cpu(buffer, cpu_file))
2961 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002962 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002963 if (ent_cpu)
2964 *ent_cpu = cpu_file;
2965
2966 return ent;
2967 }
2968
Steven Rostedtab464282008-05-12 21:21:00 +02002969 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002970
2971 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002972 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002973
Steven Rostedtbc21b472010-03-31 19:49:26 -04002974 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002975
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02002976 /*
2977 * Pick the entry with the smallest timestamp:
2978 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002979 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002980 next = ent;
2981 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002982 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002983 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002984 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985 }
2986 }
2987
Steven Rostedt12b5da32012-03-27 10:43:28 -04002988 iter->ent_size = next_size;
2989
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002990 if (ent_cpu)
2991 *ent_cpu = next_cpu;
2992
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002993 if (ent_ts)
2994 *ent_ts = next_ts;
2995
Steven Rostedtbc21b472010-03-31 19:49:26 -04002996 if (missing_events)
2997 *missing_events = next_lost;
2998
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002999 return next;
3000}
3001
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003002/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003003struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3004 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003005{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003006 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003007}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003008
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003009/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003010void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003011{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003012 iter->ent = __find_next_entry(iter, &iter->cpu,
3013 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003014
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003015 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003016 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003017
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003018 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003019}
3020
Ingo Molnare309b412008-05-12 21:20:51 +02003021static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003022{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003023 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003024 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003025}
3026
Ingo Molnare309b412008-05-12 21:20:51 +02003027static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003028{
3029 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003030 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003031 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003032
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003033 WARN_ON_ONCE(iter->leftover);
3034
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003035 (*pos)++;
3036
3037 /* can't go backwards */
3038 if (iter->idx > i)
3039 return NULL;
3040
3041 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003042 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003043 else
3044 ent = iter;
3045
3046 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003047 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003048
3049 iter->pos = *pos;
3050
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003051 return ent;
3052}
3053
Jason Wessel955b61e2010-08-05 09:22:23 -05003054void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003055{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003056 struct ring_buffer_event *event;
3057 struct ring_buffer_iter *buf_iter;
3058 unsigned long entries = 0;
3059 u64 ts;
3060
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003061 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003062
Steven Rostedt6d158a82012-06-27 20:46:14 -04003063 buf_iter = trace_buffer_iter(iter, cpu);
3064 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003065 return;
3066
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003067 ring_buffer_iter_reset(buf_iter);
3068
3069 /*
3070 * We could have the case with the max latency tracers
3071 * that a reset never took place on a cpu. This is evident
3072 * by the timestamp being before the start of the buffer.
3073 */
3074 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003075 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003076 break;
3077 entries++;
3078 ring_buffer_read(buf_iter, NULL);
3079 }
3080
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003081 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003082}
3083
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003084/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003085 * The current tracer is copied to avoid a global locking
3086 * all around.
3087 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003088static void *s_start(struct seq_file *m, loff_t *pos)
3089{
3090 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003091 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003092 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003093 void *p = NULL;
3094 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003095 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003096
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003097 /*
3098 * copy the tracer to avoid using a global lock all around.
3099 * iter->trace is a copy of current_trace, the pointer to the
3100 * name may be used instead of a strcmp(), as iter->trace->name
3101 * will point to the same string as current_trace->name.
3102 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003103 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003104 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3105 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003106 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003107
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003108#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003109 if (iter->snapshot && iter->trace->use_max_tr)
3110 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003111#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003112
3113 if (!iter->snapshot)
3114 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003115
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003116 if (*pos != iter->pos) {
3117 iter->ent = NULL;
3118 iter->cpu = 0;
3119 iter->idx = -1;
3120
Steven Rostedtae3b5092013-01-23 15:22:59 -05003121 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003122 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003123 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003124 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003125 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003126
Lai Jiangshanac91d852010-03-02 17:54:50 +08003127 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003128 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3129 ;
3130
3131 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003132 /*
3133 * If we overflowed the seq_file before, then we want
3134 * to just reuse the trace_seq buffer again.
3135 */
3136 if (iter->leftover)
3137 p = iter;
3138 else {
3139 l = *pos - 1;
3140 p = s_next(m, p, &l);
3141 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003142 }
3143
Lai Jiangshan4f535962009-05-18 19:35:34 +08003144 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003145 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003146 return p;
3147}
3148
3149static void s_stop(struct seq_file *m, void *p)
3150{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003151 struct trace_iterator *iter = m->private;
3152
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003153#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003154 if (iter->snapshot && iter->trace->use_max_tr)
3155 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003156#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003157
3158 if (!iter->snapshot)
3159 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003160
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003161 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003162 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003163}
3164
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003165static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003166get_total_entries(struct trace_buffer *buf,
3167 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003168{
3169 unsigned long count;
3170 int cpu;
3171
3172 *total = 0;
3173 *entries = 0;
3174
3175 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003176 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003177 /*
3178 * If this buffer has skipped entries, then we hold all
3179 * entries for the trace and we need to ignore the
3180 * ones before the time stamp.
3181 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003182 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3183 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003184 /* total is the same as the entries */
3185 *total += count;
3186 } else
3187 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003188 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003189 *entries += count;
3190 }
3191}
3192
Ingo Molnare309b412008-05-12 21:20:51 +02003193static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003194{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003195 seq_puts(m, "# _------=> CPU# \n"
3196 "# / _-----=> irqs-off \n"
3197 "# | / _----=> need-resched \n"
3198 "# || / _---=> hardirq/softirq \n"
3199 "# ||| / _--=> preempt-depth \n"
3200 "# |||| / delay \n"
3201 "# cmd pid ||||| time | caller \n"
3202 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003203}
3204
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003205static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003206{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003207 unsigned long total;
3208 unsigned long entries;
3209
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003210 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003211 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3212 entries, total, num_online_cpus());
3213 seq_puts(m, "#\n");
3214}
3215
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003216static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003217{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003218 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003219 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
3220 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003221}
3222
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003223static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003224{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003225 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003226 seq_puts(m, "# _-----=> irqs-off\n"
3227 "# / _----=> need-resched\n"
3228 "# | / _---=> hardirq/softirq\n"
3229 "# || / _--=> preempt-depth\n"
3230 "# ||| / delay\n"
3231 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
3232 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003233}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003234
Jiri Olsa62b915f2010-04-02 19:01:22 +02003235void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003236print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3237{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003238 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003239 struct trace_buffer *buf = iter->trace_buffer;
3240 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003241 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003242 unsigned long entries;
3243 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003244 const char *name = "preemption";
3245
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003246 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003247
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003248 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003249
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003250 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003251 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003252 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003253 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003254 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003255 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003256 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003257 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003258 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003259 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003260#if defined(CONFIG_PREEMPT_NONE)
3261 "server",
3262#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3263 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003264#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003265 "preempt",
3266#else
3267 "unknown",
3268#endif
3269 /* These are reserved for later use */
3270 0, 0, 0, 0);
3271#ifdef CONFIG_SMP
3272 seq_printf(m, " #P:%d)\n", num_online_cpus());
3273#else
3274 seq_puts(m, ")\n");
3275#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003276 seq_puts(m, "# -----------------\n");
3277 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003278 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003279 data->comm, data->pid,
3280 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003281 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003282 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003283
3284 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003285 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003286 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3287 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003288 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003289 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3290 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003291 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003292 }
3293
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003294 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003295}
3296
Steven Rostedta3097202008-11-07 22:36:02 -05003297static void test_cpu_buff_start(struct trace_iterator *iter)
3298{
3299 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003300 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003301
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003302 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003303 return;
3304
3305 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3306 return;
3307
Sasha Levin919cd972015-09-04 12:45:56 -04003308 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003309 return;
3310
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003311 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003312 return;
3313
Sasha Levin919cd972015-09-04 12:45:56 -04003314 if (iter->started)
3315 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003316
3317 /* Don't print started cpu buffer for the first entry of the trace */
3318 if (iter->idx > 1)
3319 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3320 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003321}
3322
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003323static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003324{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003325 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003326 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003327 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003328 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003329 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003330
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003331 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003332
Steven Rostedta3097202008-11-07 22:36:02 -05003333 test_cpu_buff_start(iter);
3334
Steven Rostedtf633cef2008-12-23 23:24:13 -05003335 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003336
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003337 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003338 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3339 trace_print_lat_context(iter);
3340 else
3341 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003342 }
3343
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003344 if (trace_seq_has_overflowed(s))
3345 return TRACE_TYPE_PARTIAL_LINE;
3346
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003347 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003348 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003349
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003350 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003351
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003352 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003353}
3354
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003355static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003356{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003357 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003358 struct trace_seq *s = &iter->seq;
3359 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003360 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003361
3362 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003363
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003364 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003365 trace_seq_printf(s, "%d %d %llu ",
3366 entry->pid, iter->cpu, iter->ts);
3367
3368 if (trace_seq_has_overflowed(s))
3369 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003370
Steven Rostedtf633cef2008-12-23 23:24:13 -05003371 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003372 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003373 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003374
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003375 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003376
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003377 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003378}
3379
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003380static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003381{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003382 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003383 struct trace_seq *s = &iter->seq;
3384 unsigned char newline = '\n';
3385 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003386 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003387
3388 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003389
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003390 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003391 SEQ_PUT_HEX_FIELD(s, entry->pid);
3392 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3393 SEQ_PUT_HEX_FIELD(s, iter->ts);
3394 if (trace_seq_has_overflowed(s))
3395 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003396 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003397
Steven Rostedtf633cef2008-12-23 23:24:13 -05003398 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003399 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003400 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003401 if (ret != TRACE_TYPE_HANDLED)
3402 return ret;
3403 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003404
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003405 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003406
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003407 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003408}
3409
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003410static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003411{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003412 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003413 struct trace_seq *s = &iter->seq;
3414 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003415 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003416
3417 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003418
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003419 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003420 SEQ_PUT_FIELD(s, entry->pid);
3421 SEQ_PUT_FIELD(s, iter->cpu);
3422 SEQ_PUT_FIELD(s, iter->ts);
3423 if (trace_seq_has_overflowed(s))
3424 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003425 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003426
Steven Rostedtf633cef2008-12-23 23:24:13 -05003427 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003428 return event ? event->funcs->binary(iter, 0, event) :
3429 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003430}
3431
Jiri Olsa62b915f2010-04-02 19:01:22 +02003432int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003433{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003434 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435 int cpu;
3436
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003437 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003438 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003439 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003440 buf_iter = trace_buffer_iter(iter, cpu);
3441 if (buf_iter) {
3442 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003443 return 0;
3444 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003445 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003446 return 0;
3447 }
3448 return 1;
3449 }
3450
Steven Rostedtab464282008-05-12 21:21:00 +02003451 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003452 buf_iter = trace_buffer_iter(iter, cpu);
3453 if (buf_iter) {
3454 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003455 return 0;
3456 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003457 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003458 return 0;
3459 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003460 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003461
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003462 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003463}
3464
Lai Jiangshan4f535962009-05-18 19:35:34 +08003465/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003466enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003467{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003468 struct trace_array *tr = iter->tr;
3469 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003470 enum print_line_t ret;
3471
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003472 if (iter->lost_events) {
3473 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3474 iter->cpu, iter->lost_events);
3475 if (trace_seq_has_overflowed(&iter->seq))
3476 return TRACE_TYPE_PARTIAL_LINE;
3477 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003478
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003479 if (iter->trace && iter->trace->print_line) {
3480 ret = iter->trace->print_line(iter);
3481 if (ret != TRACE_TYPE_UNHANDLED)
3482 return ret;
3483 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003484
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003485 if (iter->ent->type == TRACE_BPUTS &&
3486 trace_flags & TRACE_ITER_PRINTK &&
3487 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3488 return trace_print_bputs_msg_only(iter);
3489
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003490 if (iter->ent->type == TRACE_BPRINT &&
3491 trace_flags & TRACE_ITER_PRINTK &&
3492 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003493 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003494
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003495 if (iter->ent->type == TRACE_PRINT &&
3496 trace_flags & TRACE_ITER_PRINTK &&
3497 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003498 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003499
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003500 if (trace_flags & TRACE_ITER_BIN)
3501 return print_bin_fmt(iter);
3502
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003503 if (trace_flags & TRACE_ITER_HEX)
3504 return print_hex_fmt(iter);
3505
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003506 if (trace_flags & TRACE_ITER_RAW)
3507 return print_raw_fmt(iter);
3508
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003509 return print_trace_fmt(iter);
3510}
3511
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003512void trace_latency_header(struct seq_file *m)
3513{
3514 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003515 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003516
3517 /* print nothing if the buffers are empty */
3518 if (trace_empty(iter))
3519 return;
3520
3521 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3522 print_trace_header(m, iter);
3523
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003524 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003525 print_lat_help_header(m);
3526}
3527
Jiri Olsa62b915f2010-04-02 19:01:22 +02003528void trace_default_header(struct seq_file *m)
3529{
3530 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003531 struct trace_array *tr = iter->tr;
3532 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003533
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003534 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3535 return;
3536
Jiri Olsa62b915f2010-04-02 19:01:22 +02003537 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3538 /* print nothing if the buffers are empty */
3539 if (trace_empty(iter))
3540 return;
3541 print_trace_header(m, iter);
3542 if (!(trace_flags & TRACE_ITER_VERBOSE))
3543 print_lat_help_header(m);
3544 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003545 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3546 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003547 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003548 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003549 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003550 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003551 }
3552}
3553
Steven Rostedte0a413f2011-09-29 21:26:16 -04003554static void test_ftrace_alive(struct seq_file *m)
3555{
3556 if (!ftrace_is_dead())
3557 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003558 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3559 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003560}
3561
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003562#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003563static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003564{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003565 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3566 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3567 "# Takes a snapshot of the main buffer.\n"
3568 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3569 "# (Doesn't have to be '2' works with any number that\n"
3570 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003571}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003572
3573static void show_snapshot_percpu_help(struct seq_file *m)
3574{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003575 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003576#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003577 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3578 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003579#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003580 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3581 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003582#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003583 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3584 "# (Doesn't have to be '2' works with any number that\n"
3585 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003586}
3587
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003588static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3589{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003590 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003591 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003592 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003593 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003594
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003595 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003596 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3597 show_snapshot_main_help(m);
3598 else
3599 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003600}
3601#else
3602/* Should never be called */
3603static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3604#endif
3605
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003606static int s_show(struct seq_file *m, void *v)
3607{
3608 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003609 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003610
3611 if (iter->ent == NULL) {
3612 if (iter->tr) {
3613 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3614 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003615 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003616 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003617 if (iter->snapshot && trace_empty(iter))
3618 print_snapshot_help(m, iter);
3619 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003620 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003621 else
3622 trace_default_header(m);
3623
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003624 } else if (iter->leftover) {
3625 /*
3626 * If we filled the seq_file buffer earlier, we
3627 * want to just show it now.
3628 */
3629 ret = trace_print_seq(m, &iter->seq);
3630
3631 /* ret should this time be zero, but you never know */
3632 iter->leftover = ret;
3633
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003634 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003635 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003636 ret = trace_print_seq(m, &iter->seq);
3637 /*
3638 * If we overflow the seq_file buffer, then it will
3639 * ask us for this data again at start up.
3640 * Use that instead.
3641 * ret is 0 if seq_file write succeeded.
3642 * -1 otherwise.
3643 */
3644 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003645 }
3646
3647 return 0;
3648}
3649
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003650/*
3651 * Should be used after trace_array_get(), trace_types_lock
3652 * ensures that i_cdev was already initialized.
3653 */
3654static inline int tracing_get_cpu(struct inode *inode)
3655{
3656 if (inode->i_cdev) /* See trace_create_cpu_file() */
3657 return (long)inode->i_cdev - 1;
3658 return RING_BUFFER_ALL_CPUS;
3659}
3660
James Morris88e9d342009-09-22 16:43:43 -07003661static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003662 .start = s_start,
3663 .next = s_next,
3664 .stop = s_stop,
3665 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003666};
3667
Ingo Molnare309b412008-05-12 21:20:51 +02003668static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003669__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003670{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003671 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003672 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003673 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003674
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003675 if (tracing_disabled)
3676 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003677
Jiri Olsa50e18b92012-04-25 10:23:39 +02003678 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003679 if (!iter)
3680 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003681
Gil Fruchter72917232015-06-09 10:32:35 +03003682 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003683 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003684 if (!iter->buffer_iter)
3685 goto release;
3686
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003687 /*
3688 * We make a copy of the current tracer to avoid concurrent
3689 * changes on it while we are reading.
3690 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003691 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003692 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003693 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003694 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003695
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003696 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003697
Li Zefan79f55992009-06-15 14:58:26 +08003698 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003699 goto fail;
3700
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003701 iter->tr = tr;
3702
3703#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003704 /* Currently only the top directory has a snapshot */
3705 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003706 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003707 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003708#endif
3709 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003710 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003711 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003712 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003713 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003714
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003715 /* Notify the tracer early; before we stop tracing. */
3716 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003717 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003718
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003719 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003720 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003721 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3722
David Sharp8be07092012-11-13 12:18:22 -08003723 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003724 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003725 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3726
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003727 /* stop the trace while dumping if we are not opening "snapshot" */
3728 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003729 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003730
Steven Rostedtae3b5092013-01-23 15:22:59 -05003731 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003732 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003733 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003734 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003735 }
3736 ring_buffer_read_prepare_sync();
3737 for_each_tracing_cpu(cpu) {
3738 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003739 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003740 }
3741 } else {
3742 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003743 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003744 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003745 ring_buffer_read_prepare_sync();
3746 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003747 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003748 }
3749
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003750 mutex_unlock(&trace_types_lock);
3751
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003752 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003753
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003754 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003755 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003756 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003757 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003758release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003759 seq_release_private(inode, file);
3760 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003761}
3762
3763int tracing_open_generic(struct inode *inode, struct file *filp)
3764{
Steven Rostedt60a11772008-05-12 21:20:44 +02003765 if (tracing_disabled)
3766 return -ENODEV;
3767
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003768 filp->private_data = inode->i_private;
3769 return 0;
3770}
3771
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003772bool tracing_is_disabled(void)
3773{
3774 return (tracing_disabled) ? true: false;
3775}
3776
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003777/*
3778 * Open and update trace_array ref count.
3779 * Must have the current trace_array passed to it.
3780 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003781static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003782{
3783 struct trace_array *tr = inode->i_private;
3784
3785 if (tracing_disabled)
3786 return -ENODEV;
3787
3788 if (trace_array_get(tr) < 0)
3789 return -ENODEV;
3790
3791 filp->private_data = inode->i_private;
3792
3793 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003794}
3795
Hannes Eder4fd27352009-02-10 19:44:12 +01003796static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003797{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003798 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003799 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003800 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003801 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003802
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003803 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003804 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003805 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003806 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003807
Oleg Nesterov6484c712013-07-23 17:26:10 +02003808 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003809 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003810 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003811
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003812 for_each_tracing_cpu(cpu) {
3813 if (iter->buffer_iter[cpu])
3814 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3815 }
3816
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003817 if (iter->trace && iter->trace->close)
3818 iter->trace->close(iter);
3819
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003820 if (!iter->snapshot)
3821 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003822 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003823
3824 __trace_array_put(tr);
3825
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003826 mutex_unlock(&trace_types_lock);
3827
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003828 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003829 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003830 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003831 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003832 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003833
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003834 return 0;
3835}
3836
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003837static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3838{
3839 struct trace_array *tr = inode->i_private;
3840
3841 trace_array_put(tr);
3842 return 0;
3843}
3844
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003845static int tracing_single_release_tr(struct inode *inode, struct file *file)
3846{
3847 struct trace_array *tr = inode->i_private;
3848
3849 trace_array_put(tr);
3850
3851 return single_release(inode, file);
3852}
3853
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003854static int tracing_open(struct inode *inode, struct file *file)
3855{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003856 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003857 struct trace_iterator *iter;
3858 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003859
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003860 if (trace_array_get(tr) < 0)
3861 return -ENODEV;
3862
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003863 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003864 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3865 int cpu = tracing_get_cpu(inode);
3866
3867 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003868 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003869 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003870 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003871 }
3872
3873 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003874 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003875 if (IS_ERR(iter))
3876 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003877 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003878 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3879 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003880
3881 if (ret < 0)
3882 trace_array_put(tr);
3883
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003884 return ret;
3885}
3886
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003887/*
3888 * Some tracers are not suitable for instance buffers.
3889 * A tracer is always available for the global array (toplevel)
3890 * or if it explicitly states that it is.
3891 */
3892static bool
3893trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3894{
3895 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3896}
3897
3898/* Find the next tracer that this trace array may use */
3899static struct tracer *
3900get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3901{
3902 while (t && !trace_ok_for_array(t, tr))
3903 t = t->next;
3904
3905 return t;
3906}
3907
Ingo Molnare309b412008-05-12 21:20:51 +02003908static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003909t_next(struct seq_file *m, void *v, loff_t *pos)
3910{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003911 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003912 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003913
3914 (*pos)++;
3915
3916 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003917 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003918
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003919 return t;
3920}
3921
3922static void *t_start(struct seq_file *m, loff_t *pos)
3923{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003924 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003925 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003926 loff_t l = 0;
3927
3928 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003929
3930 t = get_tracer_for_array(tr, trace_types);
3931 for (; t && l < *pos; t = t_next(m, t, &l))
3932 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003933
3934 return t;
3935}
3936
3937static void t_stop(struct seq_file *m, void *p)
3938{
3939 mutex_unlock(&trace_types_lock);
3940}
3941
3942static int t_show(struct seq_file *m, void *v)
3943{
3944 struct tracer *t = v;
3945
3946 if (!t)
3947 return 0;
3948
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003949 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003950 if (t->next)
3951 seq_putc(m, ' ');
3952 else
3953 seq_putc(m, '\n');
3954
3955 return 0;
3956}
3957
James Morris88e9d342009-09-22 16:43:43 -07003958static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003959 .start = t_start,
3960 .next = t_next,
3961 .stop = t_stop,
3962 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003963};
3964
3965static int show_traces_open(struct inode *inode, struct file *file)
3966{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003967 struct trace_array *tr = inode->i_private;
3968 struct seq_file *m;
3969 int ret;
3970
Steven Rostedt60a11772008-05-12 21:20:44 +02003971 if (tracing_disabled)
3972 return -ENODEV;
3973
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003974 ret = seq_open(file, &show_traces_seq_ops);
3975 if (ret)
3976 return ret;
3977
3978 m = file->private_data;
3979 m->private = tr;
3980
3981 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003982}
3983
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003984static ssize_t
3985tracing_write_stub(struct file *filp, const char __user *ubuf,
3986 size_t count, loff_t *ppos)
3987{
3988 return count;
3989}
3990
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003991loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003992{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003993 int ret;
3994
Slava Pestov364829b2010-11-24 15:13:16 -08003995 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003996 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003997 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003998 file->f_pos = ret = 0;
3999
4000 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004001}
4002
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004003static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004004 .open = tracing_open,
4005 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004006 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004007 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004008 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004009};
4010
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004011static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004012 .open = show_traces_open,
4013 .read = seq_read,
4014 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004015 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004016};
4017
Ingo Molnar36dfe922008-05-12 21:20:52 +02004018/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02004019 * The tracer itself will not take this lock, but still we want
4020 * to provide a consistent cpumask to user-space:
4021 */
4022static DEFINE_MUTEX(tracing_cpumask_update_lock);
4023
4024/*
4025 * Temporary storage for the character representation of the
4026 * CPU bitmask (and one more byte for the newline):
4027 */
4028static char mask_str[NR_CPUS + 1];
4029
Ingo Molnarc7078de2008-05-12 21:20:52 +02004030static ssize_t
4031tracing_cpumask_read(struct file *filp, char __user *ubuf,
4032 size_t count, loff_t *ppos)
4033{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004034 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004035 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004036
4037 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004038
Tejun Heo1a402432015-02-13 14:37:39 -08004039 len = snprintf(mask_str, count, "%*pb\n",
4040 cpumask_pr_args(tr->tracing_cpumask));
4041 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004042 count = -EINVAL;
4043 goto out_err;
4044 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02004045 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4046
4047out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02004048 mutex_unlock(&tracing_cpumask_update_lock);
4049
4050 return count;
4051}
4052
4053static ssize_t
4054tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4055 size_t count, loff_t *ppos)
4056{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004057 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304058 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004059 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304060
4061 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4062 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004063
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304064 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004065 if (err)
4066 goto err_unlock;
4067
Li Zefan215368e2009-06-15 10:56:42 +08004068 mutex_lock(&tracing_cpumask_update_lock);
4069
Steven Rostedta5e25882008-12-02 15:34:05 -05004070 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004071 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004072 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004073 /*
4074 * Increase/decrease the disabled counter if we are
4075 * about to flip a bit in the cpumask:
4076 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004077 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304078 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004079 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4080 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004081 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004082 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304083 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004084 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4085 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004086 }
4087 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004088 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004089 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004090
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004091 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004092
Ingo Molnarc7078de2008-05-12 21:20:52 +02004093 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304094 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004095
Ingo Molnarc7078de2008-05-12 21:20:52 +02004096 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004097
4098err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004099 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004100
4101 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004102}
4103
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004104static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004105 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004106 .read = tracing_cpumask_read,
4107 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004108 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004109 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004110};
4111
Li Zefanfdb372e2009-12-08 11:15:59 +08004112static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004114 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004115 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004116 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004117 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004118
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004119 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004120 tracer_flags = tr->current_trace->flags->val;
4121 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004122
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004123 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004124 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004125 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004126 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004127 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004128 }
4129
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004130 for (i = 0; trace_opts[i].name; i++) {
4131 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004132 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004133 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004134 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004135 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004136 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004137
Li Zefanfdb372e2009-12-08 11:15:59 +08004138 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004139}
4140
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004141static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004142 struct tracer_flags *tracer_flags,
4143 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004144{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004145 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004146 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004147
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004148 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004149 if (ret)
4150 return ret;
4151
4152 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004153 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004154 else
Zhaolei77708412009-08-07 18:53:21 +08004155 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004156 return 0;
4157}
4158
Li Zefan8d18eaa2009-12-08 11:17:06 +08004159/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004160static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004161{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004162 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004163 struct tracer_flags *tracer_flags = trace->flags;
4164 struct tracer_opt *opts = NULL;
4165 int i;
4166
4167 for (i = 0; tracer_flags->opts[i].name; i++) {
4168 opts = &tracer_flags->opts[i];
4169
4170 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004171 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004172 }
4173
4174 return -EINVAL;
4175}
4176
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004177/* Some tracers require overwrite to stay enabled */
4178int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4179{
4180 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4181 return -1;
4182
4183 return 0;
4184}
4185
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004186int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004187{
4188 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004189 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004190 return 0;
4191
4192 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004193 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004194 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004195 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004196
4197 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004198 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004199 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004200 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004201
4202 if (mask == TRACE_ITER_RECORD_CMD)
4203 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004204
Steven Rostedtc37775d2016-04-13 16:59:18 -04004205 if (mask == TRACE_ITER_EVENT_FORK)
4206 trace_event_follow_fork(tr, enabled);
4207
Namhyung Kim1e104862017-04-17 11:44:28 +09004208 if (mask == TRACE_ITER_FUNC_FORK)
4209 ftrace_pid_follow_fork(tr, enabled);
4210
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004211 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004212 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004213#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004214 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004215#endif
4216 }
Steven Rostedt81698832012-10-11 10:15:05 -04004217
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004218 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004219 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004220 trace_printk_control(enabled);
4221 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004222
4223 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004224}
4225
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004226static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004227{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004228 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004229 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004230 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004231 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004232 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004233
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004234 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004235
Li Zefan8d18eaa2009-12-08 11:17:06 +08004236 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237 neg = 1;
4238 cmp += 2;
4239 }
4240
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004241 mutex_lock(&trace_types_lock);
4242
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004243 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004244 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004245 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004246 break;
4247 }
4248 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004249
4250 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004251 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004252 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004253
4254 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004255
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004256 /*
4257 * If the first trailing whitespace is replaced with '\0' by strstrip,
4258 * turn it back into a space.
4259 */
4260 if (orig_len > strlen(option))
4261 option[strlen(option)] = ' ';
4262
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004263 return ret;
4264}
4265
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004266static void __init apply_trace_boot_options(void)
4267{
4268 char *buf = trace_boot_options_buf;
4269 char *option;
4270
4271 while (true) {
4272 option = strsep(&buf, ",");
4273
4274 if (!option)
4275 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004276
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004277 if (*option)
4278 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004279
4280 /* Put back the comma to allow this to be called again */
4281 if (buf)
4282 *(buf - 1) = ',';
4283 }
4284}
4285
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004286static ssize_t
4287tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4288 size_t cnt, loff_t *ppos)
4289{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004290 struct seq_file *m = filp->private_data;
4291 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004292 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004293 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004294
4295 if (cnt >= sizeof(buf))
4296 return -EINVAL;
4297
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004298 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004299 return -EFAULT;
4300
Steven Rostedta8dd2172013-01-09 20:54:17 -05004301 buf[cnt] = 0;
4302
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004303 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004304 if (ret < 0)
4305 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004306
Jiri Olsacf8517c2009-10-23 19:36:16 -04004307 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004308
4309 return cnt;
4310}
4311
Li Zefanfdb372e2009-12-08 11:15:59 +08004312static int tracing_trace_options_open(struct inode *inode, struct file *file)
4313{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004314 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004315 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004316
Li Zefanfdb372e2009-12-08 11:15:59 +08004317 if (tracing_disabled)
4318 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004319
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004320 if (trace_array_get(tr) < 0)
4321 return -ENODEV;
4322
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004323 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4324 if (ret < 0)
4325 trace_array_put(tr);
4326
4327 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004328}
4329
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004330static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004331 .open = tracing_trace_options_open,
4332 .read = seq_read,
4333 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004334 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004335 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004336};
4337
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004338static const char readme_msg[] =
4339 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004340 "# echo 0 > tracing_on : quick way to disable tracing\n"
4341 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4342 " Important files:\n"
4343 " trace\t\t\t- The static contents of the buffer\n"
4344 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4345 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4346 " current_tracer\t- function and latency tracers\n"
4347 " available_tracers\t- list of configured tracers for current_tracer\n"
4348 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4349 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4350 " trace_clock\t\t-change the clock used to order events\n"
4351 " local: Per cpu clock but may not be synced across CPUs\n"
4352 " global: Synced across CPUs but slows tracing down.\n"
4353 " counter: Not a clock, but just an increment\n"
4354 " uptime: Jiffy counter from time of boot\n"
4355 " perf: Same clock that perf events use\n"
4356#ifdef CONFIG_X86_64
4357 " x86-tsc: TSC cycle counter\n"
4358#endif
4359 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004360 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004361 " tracing_cpumask\t- Limit which CPUs to trace\n"
4362 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4363 "\t\t\t Remove sub-buffer with rmdir\n"
4364 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004365 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4366 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004367 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004368#ifdef CONFIG_DYNAMIC_FTRACE
4369 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004370 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4371 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004372 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004373 "\t modules: Can select a group via module\n"
4374 "\t Format: :mod:<module-name>\n"
4375 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4376 "\t triggers: a command to perform when function is hit\n"
4377 "\t Format: <function>:<trigger>[:count]\n"
4378 "\t trigger: traceon, traceoff\n"
4379 "\t\t enable_event:<system>:<event>\n"
4380 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004381#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004382 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004383#endif
4384#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004385 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004386#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004387 "\t\t dump\n"
4388 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004389 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4390 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4391 "\t The first one will disable tracing every time do_fault is hit\n"
4392 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4393 "\t The first time do trap is hit and it disables tracing, the\n"
4394 "\t counter will decrement to 2. If tracing is already disabled,\n"
4395 "\t the counter will not decrement. It only decrements when the\n"
4396 "\t trigger did work\n"
4397 "\t To remove trigger without count:\n"
4398 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4399 "\t To remove trigger with a count:\n"
4400 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004401 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004402 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4403 "\t modules: Can select a group via module command :mod:\n"
4404 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004405#endif /* CONFIG_DYNAMIC_FTRACE */
4406#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004407 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4408 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004409#endif
4410#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4411 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004412 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004413 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4414#endif
4415#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004416 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4417 "\t\t\t snapshot buffer. Read the contents for more\n"
4418 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004419#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004420#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004421 " stack_trace\t\t- Shows the max stack trace when active\n"
4422 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004423 "\t\t\t Write into this file to reset the max size (trigger a\n"
4424 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004425#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004426 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4427 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004428#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004429#endif /* CONFIG_STACK_TRACER */
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004430#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004431 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4432 "\t\t\t Write into this file to define/undefine new trace events.\n"
4433#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004434#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004435 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4436 "\t\t\t Write into this file to define/undefine new trace events.\n"
4437#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004438#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004439 "\t accepts: event-definitions (one definition per line)\n"
4440 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4441 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004442#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004443 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4444#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004445#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004446 "\t place: <path>:<offset>\n"
4447#endif
4448 "\t args: <name>=fetcharg[:type]\n"
4449 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4450 "\t $stack<index>, $stack, $retval, $comm\n"
4451 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4452 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4453#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004454 " events/\t\t- Directory containing all trace event subsystems:\n"
4455 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4456 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004457 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4458 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004459 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004460 " events/<system>/<event>/\t- Directory containing control files for\n"
4461 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004462 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4463 " filter\t\t- If set, only events passing filter are traced\n"
4464 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004465 "\t Format: <trigger>[:count][if <filter>]\n"
4466 "\t trigger: traceon, traceoff\n"
4467 "\t enable_event:<system>:<event>\n"
4468 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004469#ifdef CONFIG_HIST_TRIGGERS
4470 "\t enable_hist:<system>:<event>\n"
4471 "\t disable_hist:<system>:<event>\n"
4472#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004473#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004474 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004475#endif
4476#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004477 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004478#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004479#ifdef CONFIG_HIST_TRIGGERS
4480 "\t\t hist (see below)\n"
4481#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004482 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4483 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4484 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4485 "\t events/block/block_unplug/trigger\n"
4486 "\t The first disables tracing every time block_unplug is hit.\n"
4487 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4488 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4489 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4490 "\t Like function triggers, the counter is only decremented if it\n"
4491 "\t enabled or disabled tracing.\n"
4492 "\t To remove a trigger without a count:\n"
4493 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4494 "\t To remove a trigger with a count:\n"
4495 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4496 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004497#ifdef CONFIG_HIST_TRIGGERS
4498 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004499 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004500 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004501 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004502 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004503 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004504 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004505 "\t [if <filter>]\n\n"
4506 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004507 "\t table using the key(s) and value(s) named, and the value of a\n"
4508 "\t sum called 'hitcount' is incremented. Keys and values\n"
4509 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004510 "\t can be any field, or the special string 'stacktrace'.\n"
4511 "\t Compound keys consisting of up to two fields can be specified\n"
4512 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4513 "\t fields. Sort keys consisting of up to two fields can be\n"
4514 "\t specified using the 'sort' keyword. The sort direction can\n"
4515 "\t be modified by appending '.descending' or '.ascending' to a\n"
4516 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004517 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4518 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4519 "\t its histogram data will be shared with other triggers of the\n"
4520 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004521 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004522 "\t table in its entirety to stdout. If there are multiple hist\n"
4523 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004524 "\t trigger in the output. The table displayed for a named\n"
4525 "\t trigger will be the same as any other instance having the\n"
4526 "\t same name. The default format used to display a given field\n"
4527 "\t can be modified by appending any of the following modifiers\n"
4528 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004529 "\t .hex display a number as a hex value\n"
4530 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004531 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004532 "\t .execname display a common_pid as a program name\n"
4533 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004534 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004535 "\t The 'pause' parameter can be used to pause an existing hist\n"
4536 "\t trigger or to start a hist trigger but not log any events\n"
4537 "\t until told to do so. 'continue' can be used to start or\n"
4538 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004539 "\t The 'clear' parameter will clear the contents of a running\n"
4540 "\t hist trigger and leave its current paused/active state\n"
4541 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004542 "\t The enable_hist and disable_hist triggers can be used to\n"
4543 "\t have one event conditionally start and stop another event's\n"
4544 "\t already-attached hist trigger. The syntax is analagous to\n"
4545 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004546#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004547;
4548
4549static ssize_t
4550tracing_readme_read(struct file *filp, char __user *ubuf,
4551 size_t cnt, loff_t *ppos)
4552{
4553 return simple_read_from_buffer(ubuf, cnt, ppos,
4554 readme_msg, strlen(readme_msg));
4555}
4556
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004557static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004558 .open = tracing_open_generic,
4559 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004560 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004561};
4562
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004563static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004564{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004565 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004566
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004567 if (*pos || m->count)
4568 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004569
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004570 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004571
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004572 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4573 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004574 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004575 continue;
4576
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004577 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004578 }
4579
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004580 return NULL;
4581}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004582
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004583static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4584{
4585 void *v;
4586 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004587
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004588 preempt_disable();
4589 arch_spin_lock(&trace_cmdline_lock);
4590
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004591 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004592 while (l <= *pos) {
4593 v = saved_cmdlines_next(m, v, &l);
4594 if (!v)
4595 return NULL;
4596 }
4597
4598 return v;
4599}
4600
4601static void saved_cmdlines_stop(struct seq_file *m, void *v)
4602{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004603 arch_spin_unlock(&trace_cmdline_lock);
4604 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004605}
4606
4607static int saved_cmdlines_show(struct seq_file *m, void *v)
4608{
4609 char buf[TASK_COMM_LEN];
4610 unsigned int *pid = v;
4611
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004612 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004613 seq_printf(m, "%d %s\n", *pid, buf);
4614 return 0;
4615}
4616
4617static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4618 .start = saved_cmdlines_start,
4619 .next = saved_cmdlines_next,
4620 .stop = saved_cmdlines_stop,
4621 .show = saved_cmdlines_show,
4622};
4623
4624static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4625{
4626 if (tracing_disabled)
4627 return -ENODEV;
4628
4629 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004630}
4631
4632static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004633 .open = tracing_saved_cmdlines_open,
4634 .read = seq_read,
4635 .llseek = seq_lseek,
4636 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004637};
4638
4639static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004640tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4641 size_t cnt, loff_t *ppos)
4642{
4643 char buf[64];
4644 int r;
4645
4646 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004647 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004648 arch_spin_unlock(&trace_cmdline_lock);
4649
4650 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4651}
4652
4653static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4654{
4655 kfree(s->saved_cmdlines);
4656 kfree(s->map_cmdline_to_pid);
4657 kfree(s);
4658}
4659
4660static int tracing_resize_saved_cmdlines(unsigned int val)
4661{
4662 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4663
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004664 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004665 if (!s)
4666 return -ENOMEM;
4667
4668 if (allocate_cmdlines_buffer(val, s) < 0) {
4669 kfree(s);
4670 return -ENOMEM;
4671 }
4672
4673 arch_spin_lock(&trace_cmdline_lock);
4674 savedcmd_temp = savedcmd;
4675 savedcmd = s;
4676 arch_spin_unlock(&trace_cmdline_lock);
4677 free_saved_cmdlines_buffer(savedcmd_temp);
4678
4679 return 0;
4680}
4681
4682static ssize_t
4683tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4684 size_t cnt, loff_t *ppos)
4685{
4686 unsigned long val;
4687 int ret;
4688
4689 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4690 if (ret)
4691 return ret;
4692
4693 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4694 if (!val || val > PID_MAX_DEFAULT)
4695 return -EINVAL;
4696
4697 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4698 if (ret < 0)
4699 return ret;
4700
4701 *ppos += cnt;
4702
4703 return cnt;
4704}
4705
4706static const struct file_operations tracing_saved_cmdlines_size_fops = {
4707 .open = tracing_open_generic,
4708 .read = tracing_saved_cmdlines_size_read,
4709 .write = tracing_saved_cmdlines_size_write,
4710};
4711
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004712#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4713static union trace_enum_map_item *
4714update_enum_map(union trace_enum_map_item *ptr)
4715{
4716 if (!ptr->map.enum_string) {
4717 if (ptr->tail.next) {
4718 ptr = ptr->tail.next;
4719 /* Set ptr to the next real item (skip head) */
4720 ptr++;
4721 } else
4722 return NULL;
4723 }
4724 return ptr;
4725}
4726
4727static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4728{
4729 union trace_enum_map_item *ptr = v;
4730
4731 /*
4732 * Paranoid! If ptr points to end, we don't want to increment past it.
4733 * This really should never happen.
4734 */
4735 ptr = update_enum_map(ptr);
4736 if (WARN_ON_ONCE(!ptr))
4737 return NULL;
4738
4739 ptr++;
4740
4741 (*pos)++;
4742
4743 ptr = update_enum_map(ptr);
4744
4745 return ptr;
4746}
4747
4748static void *enum_map_start(struct seq_file *m, loff_t *pos)
4749{
4750 union trace_enum_map_item *v;
4751 loff_t l = 0;
4752
4753 mutex_lock(&trace_enum_mutex);
4754
4755 v = trace_enum_maps;
4756 if (v)
4757 v++;
4758
4759 while (v && l < *pos) {
4760 v = enum_map_next(m, v, &l);
4761 }
4762
4763 return v;
4764}
4765
4766static void enum_map_stop(struct seq_file *m, void *v)
4767{
4768 mutex_unlock(&trace_enum_mutex);
4769}
4770
4771static int enum_map_show(struct seq_file *m, void *v)
4772{
4773 union trace_enum_map_item *ptr = v;
4774
4775 seq_printf(m, "%s %ld (%s)\n",
4776 ptr->map.enum_string, ptr->map.enum_value,
4777 ptr->map.system);
4778
4779 return 0;
4780}
4781
4782static const struct seq_operations tracing_enum_map_seq_ops = {
4783 .start = enum_map_start,
4784 .next = enum_map_next,
4785 .stop = enum_map_stop,
4786 .show = enum_map_show,
4787};
4788
4789static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4790{
4791 if (tracing_disabled)
4792 return -ENODEV;
4793
4794 return seq_open(filp, &tracing_enum_map_seq_ops);
4795}
4796
4797static const struct file_operations tracing_enum_map_fops = {
4798 .open = tracing_enum_map_open,
4799 .read = seq_read,
4800 .llseek = seq_lseek,
4801 .release = seq_release,
4802};
4803
4804static inline union trace_enum_map_item *
4805trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4806{
4807 /* Return tail of array given the head */
4808 return ptr + ptr->head.length + 1;
4809}
4810
4811static void
4812trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4813 int len)
4814{
4815 struct trace_enum_map **stop;
4816 struct trace_enum_map **map;
4817 union trace_enum_map_item *map_array;
4818 union trace_enum_map_item *ptr;
4819
4820 stop = start + len;
4821
4822 /*
4823 * The trace_enum_maps contains the map plus a head and tail item,
4824 * where the head holds the module and length of array, and the
4825 * tail holds a pointer to the next list.
4826 */
4827 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4828 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004829 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004830 return;
4831 }
4832
4833 mutex_lock(&trace_enum_mutex);
4834
4835 if (!trace_enum_maps)
4836 trace_enum_maps = map_array;
4837 else {
4838 ptr = trace_enum_maps;
4839 for (;;) {
4840 ptr = trace_enum_jmp_to_tail(ptr);
4841 if (!ptr->tail.next)
4842 break;
4843 ptr = ptr->tail.next;
4844
4845 }
4846 ptr->tail.next = map_array;
4847 }
4848 map_array->head.mod = mod;
4849 map_array->head.length = len;
4850 map_array++;
4851
4852 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4853 map_array->map = **map;
4854 map_array++;
4855 }
4856 memset(map_array, 0, sizeof(*map_array));
4857
4858 mutex_unlock(&trace_enum_mutex);
4859}
4860
4861static void trace_create_enum_file(struct dentry *d_tracer)
4862{
4863 trace_create_file("enum_map", 0444, d_tracer,
4864 NULL, &tracing_enum_map_fops);
4865}
4866
4867#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4868static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4869static inline void trace_insert_enum_map_file(struct module *mod,
4870 struct trace_enum_map **start, int len) { }
4871#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4872
4873static void trace_insert_enum_map(struct module *mod,
4874 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004875{
4876 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004877
4878 if (len <= 0)
4879 return;
4880
4881 map = start;
4882
4883 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004884
4885 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004886}
4887
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004888static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004889tracing_set_trace_read(struct file *filp, char __user *ubuf,
4890 size_t cnt, loff_t *ppos)
4891{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004892 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004893 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004894 int r;
4895
4896 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004897 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004898 mutex_unlock(&trace_types_lock);
4899
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004900 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004901}
4902
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004903int tracer_init(struct tracer *t, struct trace_array *tr)
4904{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004905 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004906 return t->init(tr);
4907}
4908
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004909static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004910{
4911 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004912
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004913 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004914 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004915}
4916
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004917#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004918/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004919static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4920 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004921{
4922 int cpu, ret = 0;
4923
4924 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4925 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004926 ret = ring_buffer_resize(trace_buf->buffer,
4927 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004928 if (ret < 0)
4929 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004930 per_cpu_ptr(trace_buf->data, cpu)->entries =
4931 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004932 }
4933 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004934 ret = ring_buffer_resize(trace_buf->buffer,
4935 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004936 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004937 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4938 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004939 }
4940
4941 return ret;
4942}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004943#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004944
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004945static int __tracing_resize_ring_buffer(struct trace_array *tr,
4946 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004947{
4948 int ret;
4949
4950 /*
4951 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004952 * we use the size that was given, and we can forget about
4953 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004954 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004955 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004956
Steven Rostedtb382ede62012-10-10 21:44:34 -04004957 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004958 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004959 return 0;
4960
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004961 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004962 if (ret < 0)
4963 return ret;
4964
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004965#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004966 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4967 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004968 goto out;
4969
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004970 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004971 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004972 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4973 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004974 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004975 /*
4976 * AARGH! We are left with different
4977 * size max buffer!!!!
4978 * The max buffer is our "snapshot" buffer.
4979 * When a tracer needs a snapshot (one of the
4980 * latency tracers), it swaps the max buffer
4981 * with the saved snap shot. We succeeded to
4982 * update the size of the main buffer, but failed to
4983 * update the size of the max buffer. But when we tried
4984 * to reset the main buffer to the original size, we
4985 * failed there too. This is very unlikely to
4986 * happen, but if it does, warn and kill all
4987 * tracing.
4988 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004989 WARN_ON(1);
4990 tracing_disabled = 1;
4991 }
4992 return ret;
4993 }
4994
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004995 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004996 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004997 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004998 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004999
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005000 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005001#endif /* CONFIG_TRACER_MAX_TRACE */
5002
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005003 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005004 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005005 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005006 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005007
5008 return ret;
5009}
5010
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005011static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5012 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005013{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005014 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005015
5016 mutex_lock(&trace_types_lock);
5017
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005018 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5019 /* make sure, this cpu is enabled in the mask */
5020 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5021 ret = -EINVAL;
5022 goto out;
5023 }
5024 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005025
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005026 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005027 if (ret < 0)
5028 ret = -ENOMEM;
5029
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005030out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005031 mutex_unlock(&trace_types_lock);
5032
5033 return ret;
5034}
5035
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005036
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005037/**
5038 * tracing_update_buffers - used by tracing facility to expand ring buffers
5039 *
5040 * To save on memory when the tracing is never used on a system with it
5041 * configured in. The ring buffers are set to a minimum size. But once
5042 * a user starts to use the tracing facility, then they need to grow
5043 * to their default size.
5044 *
5045 * This function is to be called when a tracer is about to be used.
5046 */
5047int tracing_update_buffers(void)
5048{
5049 int ret = 0;
5050
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005051 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005052 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005053 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005054 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005055 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005056
5057 return ret;
5058}
5059
Steven Rostedt577b7852009-02-26 23:43:05 -05005060struct trace_option_dentry;
5061
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005062static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005063create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005064
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005065/*
5066 * Used to clear out the tracer before deletion of an instance.
5067 * Must have trace_types_lock held.
5068 */
5069static void tracing_set_nop(struct trace_array *tr)
5070{
5071 if (tr->current_trace == &nop_trace)
5072 return;
5073
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005074 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005075
5076 if (tr->current_trace->reset)
5077 tr->current_trace->reset(tr);
5078
5079 tr->current_trace = &nop_trace;
5080}
5081
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005082static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005083{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005084 /* Only enable if the directory has been created already. */
5085 if (!tr->dir)
5086 return;
5087
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005088 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005089}
5090
5091static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5092{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005093 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005094#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005095 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005096#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005097 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005098
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005099 mutex_lock(&trace_types_lock);
5100
Steven Rostedt73c51622009-03-11 13:42:01 -04005101 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005102 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005103 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005104 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005105 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005106 ret = 0;
5107 }
5108
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005109 for (t = trace_types; t; t = t->next) {
5110 if (strcmp(t->name, buf) == 0)
5111 break;
5112 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005113 if (!t) {
5114 ret = -EINVAL;
5115 goto out;
5116 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005117 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005118 goto out;
5119
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005120 /* Some tracers are only allowed for the top level buffer */
5121 if (!trace_ok_for_array(t, tr)) {
5122 ret = -EINVAL;
5123 goto out;
5124 }
5125
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005126 /* If trace pipe files are being read, we can't change the tracer */
5127 if (tr->current_trace->ref) {
5128 ret = -EBUSY;
5129 goto out;
5130 }
5131
Steven Rostedt9f029e82008-11-12 15:24:24 -05005132 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005133
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005134 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005135
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005136 if (tr->current_trace->reset)
5137 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005138
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005139 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005140 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005141
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005142#ifdef CONFIG_TRACER_MAX_TRACE
5143 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005144
5145 if (had_max_tr && !t->use_max_tr) {
5146 /*
5147 * We need to make sure that the update_max_tr sees that
5148 * current_trace changed to nop_trace to keep it from
5149 * swapping the buffers after we resize it.
5150 * The update_max_tr is called from interrupts disabled
5151 * so a synchronized_sched() is sufficient.
5152 */
5153 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005154 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005155 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005156#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005157
5158#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005159 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005160 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005161 if (ret < 0)
5162 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005163 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005164#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005165
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005166 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005167 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005168 if (ret)
5169 goto out;
5170 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005171
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005172 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005173 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005174 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005175 out:
5176 mutex_unlock(&trace_types_lock);
5177
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005178 return ret;
5179}
5180
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005181static ssize_t
5182tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5183 size_t cnt, loff_t *ppos)
5184{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005185 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005186 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005187 int i;
5188 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005189 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005190
Steven Rostedt60063a62008-10-28 10:44:24 -04005191 ret = cnt;
5192
Li Zefanee6c2c12009-09-18 14:06:47 +08005193 if (cnt > MAX_TRACER_SIZE)
5194 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005195
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005196 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005197 return -EFAULT;
5198
5199 buf[cnt] = 0;
5200
5201 /* strip ending whitespace. */
5202 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5203 buf[i] = 0;
5204
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005205 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005206 if (err)
5207 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005208
Jiri Olsacf8517c2009-10-23 19:36:16 -04005209 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005210
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005211 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005212}
5213
5214static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005215tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5216 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005217{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005218 char buf[64];
5219 int r;
5220
Steven Rostedtcffae432008-05-12 21:21:00 +02005221 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005222 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005223 if (r > sizeof(buf))
5224 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005225 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005226}
5227
5228static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005229tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5230 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005231{
Hannes Eder5e398412009-02-10 19:44:34 +01005232 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005233 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005234
Peter Huewe22fe9b52011-06-07 21:58:27 +02005235 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5236 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005237 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005238
5239 *ptr = val * 1000;
5240
5241 return cnt;
5242}
5243
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005244static ssize_t
5245tracing_thresh_read(struct file *filp, char __user *ubuf,
5246 size_t cnt, loff_t *ppos)
5247{
5248 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5249}
5250
5251static ssize_t
5252tracing_thresh_write(struct file *filp, const char __user *ubuf,
5253 size_t cnt, loff_t *ppos)
5254{
5255 struct trace_array *tr = filp->private_data;
5256 int ret;
5257
5258 mutex_lock(&trace_types_lock);
5259 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5260 if (ret < 0)
5261 goto out;
5262
5263 if (tr->current_trace->update_thresh) {
5264 ret = tr->current_trace->update_thresh(tr);
5265 if (ret < 0)
5266 goto out;
5267 }
5268
5269 ret = cnt;
5270out:
5271 mutex_unlock(&trace_types_lock);
5272
5273 return ret;
5274}
5275
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005276#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005277
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005278static ssize_t
5279tracing_max_lat_read(struct file *filp, char __user *ubuf,
5280 size_t cnt, loff_t *ppos)
5281{
5282 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5283}
5284
5285static ssize_t
5286tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5287 size_t cnt, loff_t *ppos)
5288{
5289 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5290}
5291
Chen Gange428abb2015-11-10 05:15:15 +08005292#endif
5293
Steven Rostedtb3806b42008-05-12 21:20:46 +02005294static int tracing_open_pipe(struct inode *inode, struct file *filp)
5295{
Oleg Nesterov15544202013-07-23 17:25:57 +02005296 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005297 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005298 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005299
5300 if (tracing_disabled)
5301 return -ENODEV;
5302
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005303 if (trace_array_get(tr) < 0)
5304 return -ENODEV;
5305
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005306 mutex_lock(&trace_types_lock);
5307
Steven Rostedtb3806b42008-05-12 21:20:46 +02005308 /* create a buffer to store the information to pass to userspace */
5309 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005310 if (!iter) {
5311 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005312 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005313 goto out;
5314 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005315
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005316 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005317 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005318
5319 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5320 ret = -ENOMEM;
5321 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305322 }
5323
Steven Rostedta3097202008-11-07 22:36:02 -05005324 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305325 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005326
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005327 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005328 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5329
David Sharp8be07092012-11-13 12:18:22 -08005330 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005331 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005332 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5333
Oleg Nesterov15544202013-07-23 17:25:57 +02005334 iter->tr = tr;
5335 iter->trace_buffer = &tr->trace_buffer;
5336 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005337 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005338 filp->private_data = iter;
5339
Steven Rostedt107bad82008-05-12 21:21:01 +02005340 if (iter->trace->pipe_open)
5341 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005342
Arnd Bergmannb4447862010-07-07 23:40:11 +02005343 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005344
5345 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005346out:
5347 mutex_unlock(&trace_types_lock);
5348 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005349
5350fail:
5351 kfree(iter->trace);
5352 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005353 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005354 mutex_unlock(&trace_types_lock);
5355 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005356}
5357
5358static int tracing_release_pipe(struct inode *inode, struct file *file)
5359{
5360 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005361 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005362
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005363 mutex_lock(&trace_types_lock);
5364
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005365 tr->current_trace->ref--;
5366
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005367 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005368 iter->trace->pipe_close(iter);
5369
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005370 mutex_unlock(&trace_types_lock);
5371
Rusty Russell44623442009-01-01 10:12:23 +10305372 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005373 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005374 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005375
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005376 trace_array_put(tr);
5377
Steven Rostedtb3806b42008-05-12 21:20:46 +02005378 return 0;
5379}
5380
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005381static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005382trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005383{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005384 struct trace_array *tr = iter->tr;
5385
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005386 /* Iterators are static, they should be filled or empty */
5387 if (trace_buffer_iter(iter, iter->cpu_file))
5388 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005389
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005390 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005391 /*
5392 * Always select as readable when in blocking mode
5393 */
5394 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005395 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005396 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005397 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005398}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005399
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005400static unsigned int
5401tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5402{
5403 struct trace_iterator *iter = filp->private_data;
5404
5405 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005406}
5407
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005408/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005409static int tracing_wait_pipe(struct file *filp)
5410{
5411 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005412 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005413
5414 while (trace_empty(iter)) {
5415
5416 if ((filp->f_flags & O_NONBLOCK)) {
5417 return -EAGAIN;
5418 }
5419
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005420 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005421 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005422 * We still block if tracing is disabled, but we have never
5423 * read anything. This allows a user to cat this file, and
5424 * then enable tracing. But after we have read something,
5425 * we give an EOF when tracing is again disabled.
5426 *
5427 * iter->pos will be 0 if we haven't read anything.
5428 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005429 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005430 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005431
5432 mutex_unlock(&iter->mutex);
5433
Rabin Vincente30f53a2014-11-10 19:46:34 +01005434 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005435
5436 mutex_lock(&iter->mutex);
5437
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005438 if (ret)
5439 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005440 }
5441
5442 return 1;
5443}
5444
Steven Rostedtb3806b42008-05-12 21:20:46 +02005445/*
5446 * Consumer reader.
5447 */
5448static ssize_t
5449tracing_read_pipe(struct file *filp, char __user *ubuf,
5450 size_t cnt, loff_t *ppos)
5451{
5452 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005453 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005454
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005455 /*
5456 * Avoid more than one consumer on a single file descriptor
5457 * This is just a matter of traces coherency, the ring buffer itself
5458 * is protected.
5459 */
5460 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005461
5462 /* return any leftover data */
5463 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5464 if (sret != -EBUSY)
5465 goto out;
5466
5467 trace_seq_init(&iter->seq);
5468
Steven Rostedt107bad82008-05-12 21:21:01 +02005469 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005470 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5471 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005472 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005473 }
5474
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005475waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005476 sret = tracing_wait_pipe(filp);
5477 if (sret <= 0)
5478 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005479
5480 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005481 if (trace_empty(iter)) {
5482 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005483 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005484 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005485
5486 if (cnt >= PAGE_SIZE)
5487 cnt = PAGE_SIZE - 1;
5488
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005489 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005490 memset(&iter->seq, 0,
5491 sizeof(struct trace_iterator) -
5492 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005493 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005494 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005495
Lai Jiangshan4f535962009-05-18 19:35:34 +08005496 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005497 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005498 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005499 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005500 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005501
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005502 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005503 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005504 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005505 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005506 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005507 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005508 if (ret != TRACE_TYPE_NO_CONSUME)
5509 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005510
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005511 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005512 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005513
5514 /*
5515 * Setting the full flag means we reached the trace_seq buffer
5516 * size and we should leave by partial output condition above.
5517 * One of the trace_seq_* functions is not used properly.
5518 */
5519 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5520 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005521 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005522 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005523 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005524
Steven Rostedtb3806b42008-05-12 21:20:46 +02005525 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005526 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005527 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005528 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005529
5530 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005531 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005532 * entries, go back to wait for more entries.
5533 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005534 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005535 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005536
Steven Rostedt107bad82008-05-12 21:21:01 +02005537out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005538 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005539
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005540 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005541}
5542
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005543static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5544 unsigned int idx)
5545{
5546 __free_page(spd->pages[idx]);
5547}
5548
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005549static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005550 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005551 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005552 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005553 .steal = generic_pipe_buf_steal,
5554 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005555};
5556
Steven Rostedt34cd4992009-02-09 12:06:29 -05005557static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005558tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005559{
5560 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005561 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005562 int ret;
5563
5564 /* Seq buffer is page-sized, exactly what we need. */
5565 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005566 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005567 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005568
5569 if (trace_seq_has_overflowed(&iter->seq)) {
5570 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005571 break;
5572 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005573
5574 /*
5575 * This should not be hit, because it should only
5576 * be set if the iter->seq overflowed. But check it
5577 * anyway to be safe.
5578 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005579 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005580 iter->seq.seq.len = save_len;
5581 break;
5582 }
5583
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005584 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005585 if (rem < count) {
5586 rem = 0;
5587 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005588 break;
5589 }
5590
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005591 if (ret != TRACE_TYPE_NO_CONSUME)
5592 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005593 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005594 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005595 rem = 0;
5596 iter->ent = NULL;
5597 break;
5598 }
5599 }
5600
5601 return rem;
5602}
5603
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005604static ssize_t tracing_splice_read_pipe(struct file *filp,
5605 loff_t *ppos,
5606 struct pipe_inode_info *pipe,
5607 size_t len,
5608 unsigned int flags)
5609{
Jens Axboe35f3d142010-05-20 10:43:18 +02005610 struct page *pages_def[PIPE_DEF_BUFFERS];
5611 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005612 struct trace_iterator *iter = filp->private_data;
5613 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005614 .pages = pages_def,
5615 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005616 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005617 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005618 .flags = flags,
5619 .ops = &tracing_pipe_buf_ops,
5620 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005621 };
5622 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005623 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005624 unsigned int i;
5625
Jens Axboe35f3d142010-05-20 10:43:18 +02005626 if (splice_grow_spd(pipe, &spd))
5627 return -ENOMEM;
5628
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005629 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005630
5631 if (iter->trace->splice_read) {
5632 ret = iter->trace->splice_read(iter, filp,
5633 ppos, pipe, len, flags);
5634 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005635 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005636 }
5637
5638 ret = tracing_wait_pipe(filp);
5639 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005640 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005641
Jason Wessel955b61e2010-08-05 09:22:23 -05005642 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005643 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005644 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005645 }
5646
Lai Jiangshan4f535962009-05-18 19:35:34 +08005647 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005648 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005649
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005650 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005651 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005652 spd.pages[i] = alloc_page(GFP_KERNEL);
5653 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005654 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005655
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005656 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005657
5658 /* Copy the data into the page, so we can start over. */
5659 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005660 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005661 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005662 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005663 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005664 break;
5665 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005666 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005667 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005668
Steven Rostedtf9520752009-03-02 14:04:40 -05005669 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005670 }
5671
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005672 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005673 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005674 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005675
5676 spd.nr_pages = i;
5677
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005678 if (i)
5679 ret = splice_to_pipe(pipe, &spd);
5680 else
5681 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005682out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005683 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005684 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005685
Steven Rostedt34cd4992009-02-09 12:06:29 -05005686out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005687 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005688 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005689}
5690
Steven Rostedta98a3c32008-05-12 21:20:59 +02005691static ssize_t
5692tracing_entries_read(struct file *filp, char __user *ubuf,
5693 size_t cnt, loff_t *ppos)
5694{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005695 struct inode *inode = file_inode(filp);
5696 struct trace_array *tr = inode->i_private;
5697 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005698 char buf[64];
5699 int r = 0;
5700 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005701
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005702 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005703
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005704 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005705 int cpu, buf_size_same;
5706 unsigned long size;
5707
5708 size = 0;
5709 buf_size_same = 1;
5710 /* check if all cpu sizes are same */
5711 for_each_tracing_cpu(cpu) {
5712 /* fill in the size from first enabled cpu */
5713 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005714 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5715 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005716 buf_size_same = 0;
5717 break;
5718 }
5719 }
5720
5721 if (buf_size_same) {
5722 if (!ring_buffer_expanded)
5723 r = sprintf(buf, "%lu (expanded: %lu)\n",
5724 size >> 10,
5725 trace_buf_size >> 10);
5726 else
5727 r = sprintf(buf, "%lu\n", size >> 10);
5728 } else
5729 r = sprintf(buf, "X\n");
5730 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005731 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005732
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005733 mutex_unlock(&trace_types_lock);
5734
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005735 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5736 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005737}
5738
5739static ssize_t
5740tracing_entries_write(struct file *filp, const char __user *ubuf,
5741 size_t cnt, loff_t *ppos)
5742{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005743 struct inode *inode = file_inode(filp);
5744 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005745 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005746 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005747
Peter Huewe22fe9b52011-06-07 21:58:27 +02005748 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5749 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005750 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005751
5752 /* must have at least 1 entry */
5753 if (!val)
5754 return -EINVAL;
5755
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005756 /* value is in KB */
5757 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005758 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005759 if (ret < 0)
5760 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005761
Jiri Olsacf8517c2009-10-23 19:36:16 -04005762 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005763
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005764 return cnt;
5765}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005766
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005767static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005768tracing_total_entries_read(struct file *filp, char __user *ubuf,
5769 size_t cnt, loff_t *ppos)
5770{
5771 struct trace_array *tr = filp->private_data;
5772 char buf[64];
5773 int r, cpu;
5774 unsigned long size = 0, expanded_size = 0;
5775
5776 mutex_lock(&trace_types_lock);
5777 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005778 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005779 if (!ring_buffer_expanded)
5780 expanded_size += trace_buf_size >> 10;
5781 }
5782 if (ring_buffer_expanded)
5783 r = sprintf(buf, "%lu\n", size);
5784 else
5785 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5786 mutex_unlock(&trace_types_lock);
5787
5788 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5789}
5790
5791static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005792tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5793 size_t cnt, loff_t *ppos)
5794{
5795 /*
5796 * There is no need to read what the user has written, this function
5797 * is just to make sure that there is no error when "echo" is used
5798 */
5799
5800 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005801
5802 return cnt;
5803}
5804
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005805static int
5806tracing_free_buffer_release(struct inode *inode, struct file *filp)
5807{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005808 struct trace_array *tr = inode->i_private;
5809
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005810 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005811 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005812 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005813 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005814 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005815
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005816 trace_array_put(tr);
5817
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005818 return 0;
5819}
5820
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005821static ssize_t
5822tracing_mark_write(struct file *filp, const char __user *ubuf,
5823 size_t cnt, loff_t *fpos)
5824{
Alexander Z Lam2d716192013-07-01 15:31:24 -07005825 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005826 struct ring_buffer_event *event;
5827 struct ring_buffer *buffer;
5828 struct print_entry *entry;
5829 unsigned long irq_flags;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005830 const char faulted[] = "<faulted>";
Steven Rostedtd696b582011-09-22 11:50:27 -04005831 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005832 int size;
5833 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005834
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005835/* Used in tracing_mark_raw_write() as well */
5836#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005837
Steven Rostedtc76f0692008-11-07 22:36:02 -05005838 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005839 return -EINVAL;
5840
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005841 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005842 return -EINVAL;
5843
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005844 if (cnt > TRACE_BUF_SIZE)
5845 cnt = TRACE_BUF_SIZE;
5846
Steven Rostedtd696b582011-09-22 11:50:27 -04005847 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005848
Steven Rostedtd696b582011-09-22 11:50:27 -04005849 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005850 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
5851
5852 /* If less than "<faulted>", then make sure we can still add that */
5853 if (cnt < FAULTED_SIZE)
5854 size += FAULTED_SIZE - cnt;
5855
Alexander Z Lam2d716192013-07-01 15:31:24 -07005856 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05005857 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5858 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005859 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04005860 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005861 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04005862
5863 entry = ring_buffer_event_data(event);
5864 entry->ip = _THIS_IP_;
5865
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005866 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
5867 if (len) {
5868 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5869 cnt = FAULTED_SIZE;
5870 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04005871 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005872 written = cnt;
5873 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04005874
5875 if (entry->buf[cnt - 1] != '\n') {
5876 entry->buf[cnt] = '\n';
5877 entry->buf[cnt + 1] = '\0';
5878 } else
5879 entry->buf[cnt] = '\0';
5880
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005881 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005882
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005883 if (written > 0)
5884 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005885
Steven Rostedtfa32e852016-07-06 15:25:08 -04005886 return written;
5887}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005888
Steven Rostedtfa32e852016-07-06 15:25:08 -04005889/* Limit it for now to 3K (including tag) */
5890#define RAW_DATA_MAX_SIZE (1024*3)
5891
5892static ssize_t
5893tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
5894 size_t cnt, loff_t *fpos)
5895{
5896 struct trace_array *tr = filp->private_data;
5897 struct ring_buffer_event *event;
5898 struct ring_buffer *buffer;
5899 struct raw_data_entry *entry;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005900 const char faulted[] = "<faulted>";
Steven Rostedtfa32e852016-07-06 15:25:08 -04005901 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005902 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005903 int size;
5904 int len;
5905
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005906#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
5907
Steven Rostedtfa32e852016-07-06 15:25:08 -04005908 if (tracing_disabled)
5909 return -EINVAL;
5910
5911 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5912 return -EINVAL;
5913
5914 /* The marker must at least have a tag id */
5915 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
5916 return -EINVAL;
5917
5918 if (cnt > TRACE_BUF_SIZE)
5919 cnt = TRACE_BUF_SIZE;
5920
5921 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5922
Steven Rostedtfa32e852016-07-06 15:25:08 -04005923 local_save_flags(irq_flags);
5924 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005925 if (cnt < FAULT_SIZE_ID)
5926 size += FAULT_SIZE_ID - cnt;
5927
Steven Rostedtfa32e852016-07-06 15:25:08 -04005928 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05005929 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
5930 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005931 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04005932 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005933 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005934
5935 entry = ring_buffer_event_data(event);
5936
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005937 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
5938 if (len) {
5939 entry->id = -1;
5940 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5941 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005942 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005943 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005944
5945 __buffer_unlock_commit(buffer, event);
5946
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05005947 if (written > 0)
5948 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04005949
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005950 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005951}
5952
Li Zefan13f16d22009-12-08 11:16:11 +08005953static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005954{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005955 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005956 int i;
5957
5958 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005959 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005960 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005961 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5962 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005963 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005964
Li Zefan13f16d22009-12-08 11:16:11 +08005965 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005966}
5967
Steven Rostedte1e232c2014-02-10 23:38:46 -05005968static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005969{
Zhaolei5079f322009-08-25 16:12:56 +08005970 int i;
5971
Zhaolei5079f322009-08-25 16:12:56 +08005972 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5973 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5974 break;
5975 }
5976 if (i == ARRAY_SIZE(trace_clocks))
5977 return -EINVAL;
5978
Zhaolei5079f322009-08-25 16:12:56 +08005979 mutex_lock(&trace_types_lock);
5980
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005981 tr->clock_id = i;
5982
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005983 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005984
David Sharp60303ed2012-10-11 16:27:52 -07005985 /*
5986 * New clock may not be consistent with the previous clock.
5987 * Reset the buffer so that it doesn't have incomparable timestamps.
5988 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005989 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005990
5991#ifdef CONFIG_TRACER_MAX_TRACE
5992 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5993 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005994 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005995#endif
David Sharp60303ed2012-10-11 16:27:52 -07005996
Zhaolei5079f322009-08-25 16:12:56 +08005997 mutex_unlock(&trace_types_lock);
5998
Steven Rostedte1e232c2014-02-10 23:38:46 -05005999 return 0;
6000}
6001
6002static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6003 size_t cnt, loff_t *fpos)
6004{
6005 struct seq_file *m = filp->private_data;
6006 struct trace_array *tr = m->private;
6007 char buf[64];
6008 const char *clockstr;
6009 int ret;
6010
6011 if (cnt >= sizeof(buf))
6012 return -EINVAL;
6013
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006014 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006015 return -EFAULT;
6016
6017 buf[cnt] = 0;
6018
6019 clockstr = strstrip(buf);
6020
6021 ret = tracing_set_clock(tr, clockstr);
6022 if (ret)
6023 return ret;
6024
Zhaolei5079f322009-08-25 16:12:56 +08006025 *fpos += cnt;
6026
6027 return cnt;
6028}
6029
Li Zefan13f16d22009-12-08 11:16:11 +08006030static int tracing_clock_open(struct inode *inode, struct file *file)
6031{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006032 struct trace_array *tr = inode->i_private;
6033 int ret;
6034
Li Zefan13f16d22009-12-08 11:16:11 +08006035 if (tracing_disabled)
6036 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006037
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006038 if (trace_array_get(tr))
6039 return -ENODEV;
6040
6041 ret = single_open(file, tracing_clock_show, inode->i_private);
6042 if (ret < 0)
6043 trace_array_put(tr);
6044
6045 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006046}
6047
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006048struct ftrace_buffer_info {
6049 struct trace_iterator iter;
6050 void *spare;
6051 unsigned int read;
6052};
6053
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006054#ifdef CONFIG_TRACER_SNAPSHOT
6055static int tracing_snapshot_open(struct inode *inode, struct file *file)
6056{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006057 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006058 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006059 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006060 int ret = 0;
6061
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006062 if (trace_array_get(tr) < 0)
6063 return -ENODEV;
6064
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006065 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006066 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006067 if (IS_ERR(iter))
6068 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006069 } else {
6070 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006071 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006072 m = kzalloc(sizeof(*m), GFP_KERNEL);
6073 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006074 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006075 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6076 if (!iter) {
6077 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006078 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006079 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006080 ret = 0;
6081
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006082 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006083 iter->trace_buffer = &tr->max_buffer;
6084 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006085 m->private = iter;
6086 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006087 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006088out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006089 if (ret < 0)
6090 trace_array_put(tr);
6091
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006092 return ret;
6093}
6094
6095static ssize_t
6096tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6097 loff_t *ppos)
6098{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006099 struct seq_file *m = filp->private_data;
6100 struct trace_iterator *iter = m->private;
6101 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006102 unsigned long val;
6103 int ret;
6104
6105 ret = tracing_update_buffers();
6106 if (ret < 0)
6107 return ret;
6108
6109 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6110 if (ret)
6111 return ret;
6112
6113 mutex_lock(&trace_types_lock);
6114
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006115 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006116 ret = -EBUSY;
6117 goto out;
6118 }
6119
6120 switch (val) {
6121 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006122 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6123 ret = -EINVAL;
6124 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006125 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006126 if (tr->allocated_snapshot)
6127 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006128 break;
6129 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006130/* Only allow per-cpu swap if the ring buffer supports it */
6131#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6132 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6133 ret = -EINVAL;
6134 break;
6135 }
6136#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006137 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006138 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006139 if (ret < 0)
6140 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006141 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006142 local_irq_disable();
6143 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006144 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006145 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006146 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006147 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006148 local_irq_enable();
6149 break;
6150 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006151 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006152 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6153 tracing_reset_online_cpus(&tr->max_buffer);
6154 else
6155 tracing_reset(&tr->max_buffer, iter->cpu_file);
6156 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006157 break;
6158 }
6159
6160 if (ret >= 0) {
6161 *ppos += cnt;
6162 ret = cnt;
6163 }
6164out:
6165 mutex_unlock(&trace_types_lock);
6166 return ret;
6167}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006168
6169static int tracing_snapshot_release(struct inode *inode, struct file *file)
6170{
6171 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006172 int ret;
6173
6174 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006175
6176 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006177 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006178
6179 /* If write only, the seq_file is just a stub */
6180 if (m)
6181 kfree(m->private);
6182 kfree(m);
6183
6184 return 0;
6185}
6186
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006187static int tracing_buffers_open(struct inode *inode, struct file *filp);
6188static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6189 size_t count, loff_t *ppos);
6190static int tracing_buffers_release(struct inode *inode, struct file *file);
6191static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6192 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6193
6194static int snapshot_raw_open(struct inode *inode, struct file *filp)
6195{
6196 struct ftrace_buffer_info *info;
6197 int ret;
6198
6199 ret = tracing_buffers_open(inode, filp);
6200 if (ret < 0)
6201 return ret;
6202
6203 info = filp->private_data;
6204
6205 if (info->iter.trace->use_max_tr) {
6206 tracing_buffers_release(inode, filp);
6207 return -EBUSY;
6208 }
6209
6210 info->iter.snapshot = true;
6211 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6212
6213 return ret;
6214}
6215
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006216#endif /* CONFIG_TRACER_SNAPSHOT */
6217
6218
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006219static const struct file_operations tracing_thresh_fops = {
6220 .open = tracing_open_generic,
6221 .read = tracing_thresh_read,
6222 .write = tracing_thresh_write,
6223 .llseek = generic_file_llseek,
6224};
6225
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006226#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006227static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006228 .open = tracing_open_generic,
6229 .read = tracing_max_lat_read,
6230 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006231 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006232};
Chen Gange428abb2015-11-10 05:15:15 +08006233#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006234
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006235static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006236 .open = tracing_open_generic,
6237 .read = tracing_set_trace_read,
6238 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006239 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006240};
6241
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006242static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006243 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006244 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006245 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006246 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006247 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006248 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006249};
6250
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006251static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006252 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006253 .read = tracing_entries_read,
6254 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006255 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006256 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006257};
6258
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006259static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006260 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006261 .read = tracing_total_entries_read,
6262 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006263 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006264};
6265
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006266static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006267 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006268 .write = tracing_free_buffer_write,
6269 .release = tracing_free_buffer_release,
6270};
6271
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006272static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006273 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006274 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006275 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006276 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006277};
6278
Steven Rostedtfa32e852016-07-06 15:25:08 -04006279static const struct file_operations tracing_mark_raw_fops = {
6280 .open = tracing_open_generic_tr,
6281 .write = tracing_mark_raw_write,
6282 .llseek = generic_file_llseek,
6283 .release = tracing_release_generic_tr,
6284};
6285
Zhaolei5079f322009-08-25 16:12:56 +08006286static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006287 .open = tracing_clock_open,
6288 .read = seq_read,
6289 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006290 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006291 .write = tracing_clock_write,
6292};
6293
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006294#ifdef CONFIG_TRACER_SNAPSHOT
6295static const struct file_operations snapshot_fops = {
6296 .open = tracing_snapshot_open,
6297 .read = seq_read,
6298 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006299 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006300 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006301};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006302
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006303static const struct file_operations snapshot_raw_fops = {
6304 .open = snapshot_raw_open,
6305 .read = tracing_buffers_read,
6306 .release = tracing_buffers_release,
6307 .splice_read = tracing_buffers_splice_read,
6308 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006309};
6310
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006311#endif /* CONFIG_TRACER_SNAPSHOT */
6312
Steven Rostedt2cadf912008-12-01 22:20:19 -05006313static int tracing_buffers_open(struct inode *inode, struct file *filp)
6314{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006315 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006316 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006317 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006318
6319 if (tracing_disabled)
6320 return -ENODEV;
6321
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006322 if (trace_array_get(tr) < 0)
6323 return -ENODEV;
6324
Steven Rostedt2cadf912008-12-01 22:20:19 -05006325 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006326 if (!info) {
6327 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006328 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006329 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006330
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006331 mutex_lock(&trace_types_lock);
6332
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006333 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006334 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006335 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006336 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006337 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006338 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006339 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006340
6341 filp->private_data = info;
6342
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006343 tr->current_trace->ref++;
6344
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006345 mutex_unlock(&trace_types_lock);
6346
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006347 ret = nonseekable_open(inode, filp);
6348 if (ret < 0)
6349 trace_array_put(tr);
6350
6351 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006352}
6353
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006354static unsigned int
6355tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6356{
6357 struct ftrace_buffer_info *info = filp->private_data;
6358 struct trace_iterator *iter = &info->iter;
6359
6360 return trace_poll(iter, filp, poll_table);
6361}
6362
Steven Rostedt2cadf912008-12-01 22:20:19 -05006363static ssize_t
6364tracing_buffers_read(struct file *filp, char __user *ubuf,
6365 size_t count, loff_t *ppos)
6366{
6367 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006368 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006369 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006370 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006371
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006372 if (!count)
6373 return 0;
6374
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006375#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006376 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6377 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006378#endif
6379
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006380 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006381 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6382 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006383 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006384 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006385
Steven Rostedt2cadf912008-12-01 22:20:19 -05006386 /* Do we have previous read data to read? */
6387 if (info->read < PAGE_SIZE)
6388 goto read;
6389
Steven Rostedtb6273442013-02-28 13:44:11 -05006390 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006391 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006392 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006393 &info->spare,
6394 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006395 iter->cpu_file, 0);
6396 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006397
6398 if (ret < 0) {
6399 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006400 if ((filp->f_flags & O_NONBLOCK))
6401 return -EAGAIN;
6402
Rabin Vincente30f53a2014-11-10 19:46:34 +01006403 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006404 if (ret)
6405 return ret;
6406
Steven Rostedtb6273442013-02-28 13:44:11 -05006407 goto again;
6408 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006409 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006410 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006411
Steven Rostedt436fc282011-10-14 10:44:25 -04006412 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006413 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006414 size = PAGE_SIZE - info->read;
6415 if (size > count)
6416 size = count;
6417
6418 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006419 if (ret == size)
6420 return -EFAULT;
6421
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006422 size -= ret;
6423
Steven Rostedt2cadf912008-12-01 22:20:19 -05006424 *ppos += size;
6425 info->read += size;
6426
6427 return size;
6428}
6429
6430static int tracing_buffers_release(struct inode *inode, struct file *file)
6431{
6432 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006433 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006434
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006435 mutex_lock(&trace_types_lock);
6436
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006437 iter->tr->current_trace->ref--;
6438
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006439 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006440
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006441 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006442 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006443 kfree(info);
6444
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006445 mutex_unlock(&trace_types_lock);
6446
Steven Rostedt2cadf912008-12-01 22:20:19 -05006447 return 0;
6448}
6449
6450struct buffer_ref {
6451 struct ring_buffer *buffer;
6452 void *page;
6453 int ref;
6454};
6455
6456static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6457 struct pipe_buffer *buf)
6458{
6459 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6460
6461 if (--ref->ref)
6462 return;
6463
6464 ring_buffer_free_read_page(ref->buffer, ref->page);
6465 kfree(ref);
6466 buf->private = 0;
6467}
6468
Steven Rostedt2cadf912008-12-01 22:20:19 -05006469static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6470 struct pipe_buffer *buf)
6471{
6472 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6473
6474 ref->ref++;
6475}
6476
6477/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006478static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006479 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006480 .confirm = generic_pipe_buf_confirm,
6481 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006482 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006483 .get = buffer_pipe_buf_get,
6484};
6485
6486/*
6487 * Callback from splice_to_pipe(), if we need to release some pages
6488 * at the end of the spd in case we error'ed out in filling the pipe.
6489 */
6490static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6491{
6492 struct buffer_ref *ref =
6493 (struct buffer_ref *)spd->partial[i].private;
6494
6495 if (--ref->ref)
6496 return;
6497
6498 ring_buffer_free_read_page(ref->buffer, ref->page);
6499 kfree(ref);
6500 spd->partial[i].private = 0;
6501}
6502
6503static ssize_t
6504tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6505 struct pipe_inode_info *pipe, size_t len,
6506 unsigned int flags)
6507{
6508 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006509 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006510 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6511 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006512 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006513 .pages = pages_def,
6514 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006515 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006516 .flags = flags,
6517 .ops = &buffer_pipe_buf_ops,
6518 .spd_release = buffer_spd_release,
6519 };
6520 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04006521 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006522 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006523
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006524#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006525 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6526 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006527#endif
6528
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006529 if (*ppos & (PAGE_SIZE - 1))
6530 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006531
6532 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006533 if (len < PAGE_SIZE)
6534 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006535 len &= PAGE_MASK;
6536 }
6537
Al Viro1ae22932016-09-17 18:31:46 -04006538 if (splice_grow_spd(pipe, &spd))
6539 return -ENOMEM;
6540
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006541 again:
6542 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006543 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006544
Al Viroa786c062014-04-11 12:01:03 -04006545 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006546 struct page *page;
6547 int r;
6548
6549 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006550 if (!ref) {
6551 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006552 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006553 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006554
Steven Rostedt7267fa62009-04-29 00:16:21 -04006555 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006556 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006557 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006558 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006559 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006560 kfree(ref);
6561 break;
6562 }
6563
6564 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006565 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006566 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006567 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006568 kfree(ref);
6569 break;
6570 }
6571
6572 /*
6573 * zero out any left over data, this is going to
6574 * user land.
6575 */
6576 size = ring_buffer_page_len(ref->page);
6577 if (size < PAGE_SIZE)
6578 memset(ref->page + size, 0, PAGE_SIZE - size);
6579
6580 page = virt_to_page(ref->page);
6581
6582 spd.pages[i] = page;
6583 spd.partial[i].len = PAGE_SIZE;
6584 spd.partial[i].offset = 0;
6585 spd.partial[i].private = (unsigned long)ref;
6586 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006587 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006588
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006589 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006590 }
6591
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006592 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006593 spd.nr_pages = i;
6594
6595 /* did we read anything? */
6596 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006597 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006598 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006599
Al Viro1ae22932016-09-17 18:31:46 -04006600 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006601 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006602 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006603
Rabin Vincente30f53a2014-11-10 19:46:34 +01006604 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006605 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006606 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006607
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006608 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006609 }
6610
6611 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006612out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006613 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006614
Steven Rostedt2cadf912008-12-01 22:20:19 -05006615 return ret;
6616}
6617
6618static const struct file_operations tracing_buffers_fops = {
6619 .open = tracing_buffers_open,
6620 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006621 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006622 .release = tracing_buffers_release,
6623 .splice_read = tracing_buffers_splice_read,
6624 .llseek = no_llseek,
6625};
6626
Steven Rostedtc8d77182009-04-29 18:03:45 -04006627static ssize_t
6628tracing_stats_read(struct file *filp, char __user *ubuf,
6629 size_t count, loff_t *ppos)
6630{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006631 struct inode *inode = file_inode(filp);
6632 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006633 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006634 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006635 struct trace_seq *s;
6636 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006637 unsigned long long t;
6638 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006639
Li Zefane4f2d102009-06-15 10:57:28 +08006640 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006641 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006642 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006643
6644 trace_seq_init(s);
6645
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006646 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006647 trace_seq_printf(s, "entries: %ld\n", cnt);
6648
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006649 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006650 trace_seq_printf(s, "overrun: %ld\n", cnt);
6651
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006652 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006653 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6654
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006655 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006656 trace_seq_printf(s, "bytes: %ld\n", cnt);
6657
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006658 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006659 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006660 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006661 usec_rem = do_div(t, USEC_PER_SEC);
6662 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6663 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006664
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006665 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006666 usec_rem = do_div(t, USEC_PER_SEC);
6667 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6668 } else {
6669 /* counter or tsc mode for trace_clock */
6670 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006671 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006672
6673 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006674 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006675 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006676
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006677 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006678 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6679
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006680 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006681 trace_seq_printf(s, "read events: %ld\n", cnt);
6682
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006683 count = simple_read_from_buffer(ubuf, count, ppos,
6684 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006685
6686 kfree(s);
6687
6688 return count;
6689}
6690
6691static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006692 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006693 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006694 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006695 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006696};
6697
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006698#ifdef CONFIG_DYNAMIC_FTRACE
6699
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006700int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006701{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006702 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006703}
6704
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006705static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006706tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006707 size_t cnt, loff_t *ppos)
6708{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006709 static char ftrace_dyn_info_buffer[1024];
6710 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006711 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006712 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006713 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006714 int r;
6715
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006716 mutex_lock(&dyn_info_mutex);
6717 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006718
Steven Rostedta26a2a22008-10-31 00:03:22 -04006719 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006720 buf[r++] = '\n';
6721
6722 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6723
6724 mutex_unlock(&dyn_info_mutex);
6725
6726 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006727}
6728
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006729static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006730 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006731 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006732 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006733};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006734#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006735
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006736#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6737static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04006738ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006739 struct trace_array *tr, struct ftrace_probe_ops *ops,
6740 void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006741{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006742 tracing_snapshot();
6743}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006744
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006745static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04006746ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006747 struct trace_array *tr, struct ftrace_probe_ops *ops,
6748 void **data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006749{
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006750 struct ftrace_func_mapper *mapper = ops->private_data;
6751 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006752
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006753 if (mapper)
6754 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006755
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006756 if (count) {
6757
6758 if (*count <= 0)
6759 return;
6760
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006761 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006762 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006763
6764 tracing_snapshot();
6765}
6766
6767static int
6768ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6769 struct ftrace_probe_ops *ops, void *data)
6770{
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006771 struct ftrace_func_mapper *mapper = ops->private_data;
6772 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006773
6774 seq_printf(m, "%ps:", (void *)ip);
6775
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006776 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006777
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006778 if (mapper)
6779 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6780
6781 if (count)
6782 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006783 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006784 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006785
6786 return 0;
6787}
6788
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006789static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006790ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6791 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006792{
6793 struct ftrace_func_mapper *mapper = ops->private_data;
6794
Steven Rostedt (VMware)1a48df02017-04-04 10:27:51 -04006795 return ftrace_func_mapper_add_ip(mapper, ip, data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006796}
6797
6798static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006799ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6800 unsigned long ip, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006801{
6802 struct ftrace_func_mapper *mapper = ops->private_data;
6803
6804 ftrace_func_mapper_remove_ip(mapper, ip);
6805}
6806
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006807static struct ftrace_probe_ops snapshot_probe_ops = {
6808 .func = ftrace_snapshot,
6809 .print = ftrace_snapshot_print,
6810};
6811
6812static struct ftrace_probe_ops snapshot_count_probe_ops = {
6813 .func = ftrace_count_snapshot,
6814 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006815 .init = ftrace_snapshot_init,
6816 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006817};
6818
6819static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006820ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006821 char *glob, char *cmd, char *param, int enable)
6822{
6823 struct ftrace_probe_ops *ops;
6824 void *count = (void *)-1;
6825 char *number;
6826 int ret;
6827
6828 /* hash funcs only work with set_ftrace_filter */
6829 if (!enable)
6830 return -EINVAL;
6831
6832 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6833
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04006834 if (glob[0] == '!')
6835 return unregister_ftrace_function_probe_func(glob+1, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006836
6837 if (!param)
6838 goto out_reg;
6839
6840 number = strsep(&param, ":");
6841
6842 if (!strlen(number))
6843 goto out_reg;
6844
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006845 if (!ops->private_data) {
6846 ops->private_data = allocate_ftrace_func_mapper();
6847 if (!ops->private_data)
6848 return -ENOMEM;
6849 }
6850
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006851 /*
6852 * We use the callback data field (which is a pointer)
6853 * as our counter.
6854 */
6855 ret = kstrtoul(number, 0, (unsigned long *)&count);
6856 if (ret)
6857 return ret;
6858
6859 out_reg:
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04006860 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006861
6862 if (ret >= 0)
6863 alloc_snapshot(&global_trace);
6864
6865 return ret < 0 ? ret : 0;
6866}
6867
6868static struct ftrace_func_command ftrace_snapshot_cmd = {
6869 .name = "snapshot",
6870 .func = ftrace_trace_snapshot_callback,
6871};
6872
Tom Zanussi38de93a2013-10-24 08:34:18 -05006873static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006874{
6875 return register_ftrace_command(&ftrace_snapshot_cmd);
6876}
6877#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006878static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006879#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006880
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006881static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006882{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006883 if (WARN_ON(!tr->dir))
6884 return ERR_PTR(-ENODEV);
6885
6886 /* Top directory uses NULL as the parent */
6887 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6888 return NULL;
6889
6890 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006891 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006892}
6893
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006894static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6895{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006896 struct dentry *d_tracer;
6897
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006898 if (tr->percpu_dir)
6899 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006900
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006901 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006902 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006903 return NULL;
6904
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006905 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006906
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006907 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006908 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006909
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006910 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006911}
6912
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006913static struct dentry *
6914trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6915 void *data, long cpu, const struct file_operations *fops)
6916{
6917 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6918
6919 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006920 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006921 return ret;
6922}
6923
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006924static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006925tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006926{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006927 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006928 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006929 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006930
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006931 if (!d_percpu)
6932 return;
6933
Steven Rostedtdd49a382010-10-20 21:51:26 -04006934 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006935 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006936 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006937 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006938 return;
6939 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006940
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006941 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006942 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006943 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006944
6945 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006946 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006947 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006948
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006949 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006950 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006951
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006952 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006953 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006954
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006955 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006956 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006957
6958#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006959 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006960 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006961
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006962 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006963 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006964#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006965}
6966
Steven Rostedt60a11772008-05-12 21:20:44 +02006967#ifdef CONFIG_FTRACE_SELFTEST
6968/* Let selftest have access to static functions in this file */
6969#include "trace_selftest.c"
6970#endif
6971
Steven Rostedt577b7852009-02-26 23:43:05 -05006972static ssize_t
6973trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6974 loff_t *ppos)
6975{
6976 struct trace_option_dentry *topt = filp->private_data;
6977 char *buf;
6978
6979 if (topt->flags->val & topt->opt->bit)
6980 buf = "1\n";
6981 else
6982 buf = "0\n";
6983
6984 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6985}
6986
6987static ssize_t
6988trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6989 loff_t *ppos)
6990{
6991 struct trace_option_dentry *topt = filp->private_data;
6992 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006993 int ret;
6994
Peter Huewe22fe9b52011-06-07 21:58:27 +02006995 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6996 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006997 return ret;
6998
Li Zefan8d18eaa2009-12-08 11:17:06 +08006999 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007000 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007001
7002 if (!!(topt->flags->val & topt->opt->bit) != val) {
7003 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007004 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007005 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007006 mutex_unlock(&trace_types_lock);
7007 if (ret)
7008 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007009 }
7010
7011 *ppos += cnt;
7012
7013 return cnt;
7014}
7015
7016
7017static const struct file_operations trace_options_fops = {
7018 .open = tracing_open_generic,
7019 .read = trace_options_read,
7020 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007021 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007022};
7023
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007024/*
7025 * In order to pass in both the trace_array descriptor as well as the index
7026 * to the flag that the trace option file represents, the trace_array
7027 * has a character array of trace_flags_index[], which holds the index
7028 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7029 * The address of this character array is passed to the flag option file
7030 * read/write callbacks.
7031 *
7032 * In order to extract both the index and the trace_array descriptor,
7033 * get_tr_index() uses the following algorithm.
7034 *
7035 * idx = *ptr;
7036 *
7037 * As the pointer itself contains the address of the index (remember
7038 * index[1] == 1).
7039 *
7040 * Then to get the trace_array descriptor, by subtracting that index
7041 * from the ptr, we get to the start of the index itself.
7042 *
7043 * ptr - idx == &index[0]
7044 *
7045 * Then a simple container_of() from that pointer gets us to the
7046 * trace_array descriptor.
7047 */
7048static void get_tr_index(void *data, struct trace_array **ptr,
7049 unsigned int *pindex)
7050{
7051 *pindex = *(unsigned char *)data;
7052
7053 *ptr = container_of(data - *pindex, struct trace_array,
7054 trace_flags_index);
7055}
7056
Steven Rostedta8259072009-02-26 22:19:12 -05007057static ssize_t
7058trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7059 loff_t *ppos)
7060{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007061 void *tr_index = filp->private_data;
7062 struct trace_array *tr;
7063 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007064 char *buf;
7065
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007066 get_tr_index(tr_index, &tr, &index);
7067
7068 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007069 buf = "1\n";
7070 else
7071 buf = "0\n";
7072
7073 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7074}
7075
7076static ssize_t
7077trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7078 loff_t *ppos)
7079{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007080 void *tr_index = filp->private_data;
7081 struct trace_array *tr;
7082 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007083 unsigned long val;
7084 int ret;
7085
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007086 get_tr_index(tr_index, &tr, &index);
7087
Peter Huewe22fe9b52011-06-07 21:58:27 +02007088 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7089 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007090 return ret;
7091
Zhaoleif2d84b62009-08-07 18:55:48 +08007092 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007093 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007094
7095 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007096 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007097 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007098
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007099 if (ret < 0)
7100 return ret;
7101
Steven Rostedta8259072009-02-26 22:19:12 -05007102 *ppos += cnt;
7103
7104 return cnt;
7105}
7106
Steven Rostedta8259072009-02-26 22:19:12 -05007107static const struct file_operations trace_options_core_fops = {
7108 .open = tracing_open_generic,
7109 .read = trace_options_core_read,
7110 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007111 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007112};
7113
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007114struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007115 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007116 struct dentry *parent,
7117 void *data,
7118 const struct file_operations *fops)
7119{
7120 struct dentry *ret;
7121
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007122 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007123 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007124 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007125
7126 return ret;
7127}
7128
7129
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007130static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007131{
7132 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007133
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007134 if (tr->options)
7135 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007136
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007137 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007138 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007139 return NULL;
7140
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007141 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007142 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007143 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007144 return NULL;
7145 }
7146
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007147 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007148}
7149
Steven Rostedt577b7852009-02-26 23:43:05 -05007150static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007151create_trace_option_file(struct trace_array *tr,
7152 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007153 struct tracer_flags *flags,
7154 struct tracer_opt *opt)
7155{
7156 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007157
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007158 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007159 if (!t_options)
7160 return;
7161
7162 topt->flags = flags;
7163 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007164 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007165
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007166 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007167 &trace_options_fops);
7168
Steven Rostedt577b7852009-02-26 23:43:05 -05007169}
7170
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007171static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007172create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007173{
7174 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007175 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007176 struct tracer_flags *flags;
7177 struct tracer_opt *opts;
7178 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007179 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007180
7181 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007182 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007183
7184 flags = tracer->flags;
7185
7186 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007187 return;
7188
7189 /*
7190 * If this is an instance, only create flags for tracers
7191 * the instance may have.
7192 */
7193 if (!trace_ok_for_array(tracer, tr))
7194 return;
7195
7196 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007197 /* Make sure there's no duplicate flags. */
7198 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007199 return;
7200 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007201
7202 opts = flags->opts;
7203
7204 for (cnt = 0; opts[cnt].name; cnt++)
7205 ;
7206
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007207 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007208 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007209 return;
7210
7211 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7212 GFP_KERNEL);
7213 if (!tr_topts) {
7214 kfree(topts);
7215 return;
7216 }
7217
7218 tr->topts = tr_topts;
7219 tr->topts[tr->nr_topts].tracer = tracer;
7220 tr->topts[tr->nr_topts].topts = topts;
7221 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007222
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007223 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007224 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007225 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007226 WARN_ONCE(topts[cnt].entry == NULL,
7227 "Failed to create trace option: %s",
7228 opts[cnt].name);
7229 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007230}
7231
Steven Rostedta8259072009-02-26 22:19:12 -05007232static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007233create_trace_option_core_file(struct trace_array *tr,
7234 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007235{
7236 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007237
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007238 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007239 if (!t_options)
7240 return NULL;
7241
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007242 return trace_create_file(option, 0644, t_options,
7243 (void *)&tr->trace_flags_index[index],
7244 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007245}
7246
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007247static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007248{
7249 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007250 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007251 int i;
7252
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007253 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007254 if (!t_options)
7255 return;
7256
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007257 for (i = 0; trace_options[i]; i++) {
7258 if (top_level ||
7259 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7260 create_trace_option_core_file(tr, trace_options[i], i);
7261 }
Steven Rostedta8259072009-02-26 22:19:12 -05007262}
7263
Steven Rostedt499e5472012-02-22 15:50:28 -05007264static ssize_t
7265rb_simple_read(struct file *filp, char __user *ubuf,
7266 size_t cnt, loff_t *ppos)
7267{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007268 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007269 char buf[64];
7270 int r;
7271
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007272 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007273 r = sprintf(buf, "%d\n", r);
7274
7275 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7276}
7277
7278static ssize_t
7279rb_simple_write(struct file *filp, const char __user *ubuf,
7280 size_t cnt, loff_t *ppos)
7281{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007282 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007283 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007284 unsigned long val;
7285 int ret;
7286
7287 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7288 if (ret)
7289 return ret;
7290
7291 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007292 mutex_lock(&trace_types_lock);
7293 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007294 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007295 if (tr->current_trace->start)
7296 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007297 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007298 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007299 if (tr->current_trace->stop)
7300 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007301 }
7302 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007303 }
7304
7305 (*ppos)++;
7306
7307 return cnt;
7308}
7309
7310static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007311 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007312 .read = rb_simple_read,
7313 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007314 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007315 .llseek = default_llseek,
7316};
7317
Steven Rostedt277ba042012-08-03 16:10:49 -04007318struct dentry *trace_instance_dir;
7319
7320static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007321init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007322
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007323static int
7324allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007325{
7326 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007327
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007328 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007329
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007330 buf->tr = tr;
7331
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007332 buf->buffer = ring_buffer_alloc(size, rb_flags);
7333 if (!buf->buffer)
7334 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007335
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007336 buf->data = alloc_percpu(struct trace_array_cpu);
7337 if (!buf->data) {
7338 ring_buffer_free(buf->buffer);
7339 return -ENOMEM;
7340 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007341
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007342 /* Allocate the first page for all buffers */
7343 set_buffer_entries(&tr->trace_buffer,
7344 ring_buffer_size(tr->trace_buffer.buffer, 0));
7345
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007346 return 0;
7347}
7348
7349static int allocate_trace_buffers(struct trace_array *tr, int size)
7350{
7351 int ret;
7352
7353 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7354 if (ret)
7355 return ret;
7356
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007357#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007358 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7359 allocate_snapshot ? size : 1);
7360 if (WARN_ON(ret)) {
7361 ring_buffer_free(tr->trace_buffer.buffer);
7362 free_percpu(tr->trace_buffer.data);
7363 return -ENOMEM;
7364 }
7365 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007366
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007367 /*
7368 * Only the top level trace array gets its snapshot allocated
7369 * from the kernel command line.
7370 */
7371 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007372#endif
7373 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007374}
7375
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007376static void free_trace_buffer(struct trace_buffer *buf)
7377{
7378 if (buf->buffer) {
7379 ring_buffer_free(buf->buffer);
7380 buf->buffer = NULL;
7381 free_percpu(buf->data);
7382 buf->data = NULL;
7383 }
7384}
7385
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007386static void free_trace_buffers(struct trace_array *tr)
7387{
7388 if (!tr)
7389 return;
7390
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007391 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007392
7393#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007394 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007395#endif
7396}
7397
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007398static void init_trace_flags_index(struct trace_array *tr)
7399{
7400 int i;
7401
7402 /* Used by the trace options files */
7403 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7404 tr->trace_flags_index[i] = i;
7405}
7406
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007407static void __update_tracer_options(struct trace_array *tr)
7408{
7409 struct tracer *t;
7410
7411 for (t = trace_types; t; t = t->next)
7412 add_tracer_options(tr, t);
7413}
7414
7415static void update_tracer_options(struct trace_array *tr)
7416{
7417 mutex_lock(&trace_types_lock);
7418 __update_tracer_options(tr);
7419 mutex_unlock(&trace_types_lock);
7420}
7421
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007422static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007423{
Steven Rostedt277ba042012-08-03 16:10:49 -04007424 struct trace_array *tr;
7425 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007426
7427 mutex_lock(&trace_types_lock);
7428
7429 ret = -EEXIST;
7430 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7431 if (tr->name && strcmp(tr->name, name) == 0)
7432 goto out_unlock;
7433 }
7434
7435 ret = -ENOMEM;
7436 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7437 if (!tr)
7438 goto out_unlock;
7439
7440 tr->name = kstrdup(name, GFP_KERNEL);
7441 if (!tr->name)
7442 goto out_free_tr;
7443
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007444 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7445 goto out_free_tr;
7446
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007447 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007448
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007449 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7450
Steven Rostedt277ba042012-08-03 16:10:49 -04007451 raw_spin_lock_init(&tr->start_lock);
7452
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007453 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7454
Steven Rostedt277ba042012-08-03 16:10:49 -04007455 tr->current_trace = &nop_trace;
7456
7457 INIT_LIST_HEAD(&tr->systems);
7458 INIT_LIST_HEAD(&tr->events);
7459
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007460 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007461 goto out_free_tr;
7462
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007463 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007464 if (!tr->dir)
7465 goto out_free_tr;
7466
7467 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007468 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007469 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007470 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007471 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007472
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007473 ftrace_init_trace_array(tr);
7474
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007475 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007476 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007477 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007478
7479 list_add(&tr->list, &ftrace_trace_arrays);
7480
7481 mutex_unlock(&trace_types_lock);
7482
7483 return 0;
7484
7485 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007486 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007487 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007488 kfree(tr->name);
7489 kfree(tr);
7490
7491 out_unlock:
7492 mutex_unlock(&trace_types_lock);
7493
7494 return ret;
7495
7496}
7497
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007498static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007499{
7500 struct trace_array *tr;
7501 int found = 0;
7502 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007503 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007504
7505 mutex_lock(&trace_types_lock);
7506
7507 ret = -ENODEV;
7508 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7509 if (tr->name && strcmp(tr->name, name) == 0) {
7510 found = 1;
7511 break;
7512 }
7513 }
7514 if (!found)
7515 goto out_unlock;
7516
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007517 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007518 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007519 goto out_unlock;
7520
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007521 list_del(&tr->list);
7522
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007523 /* Disable all the flags that were enabled coming in */
7524 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7525 if ((1 << i) & ZEROED_TRACE_FLAGS)
7526 set_tracer_flag(tr, 1 << i, 0);
7527 }
7528
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007529 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007530 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007531 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007532 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007533 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007534
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007535 for (i = 0; i < tr->nr_topts; i++) {
7536 kfree(tr->topts[i].topts);
7537 }
7538 kfree(tr->topts);
7539
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007540 kfree(tr->name);
7541 kfree(tr);
7542
7543 ret = 0;
7544
7545 out_unlock:
7546 mutex_unlock(&trace_types_lock);
7547
7548 return ret;
7549}
7550
Steven Rostedt277ba042012-08-03 16:10:49 -04007551static __init void create_trace_instances(struct dentry *d_tracer)
7552{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007553 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7554 instance_mkdir,
7555 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007556 if (WARN_ON(!trace_instance_dir))
7557 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007558}
7559
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007560static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007561init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007562{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007563 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007564
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007565 trace_create_file("available_tracers", 0444, d_tracer,
7566 tr, &show_traces_fops);
7567
7568 trace_create_file("current_tracer", 0644, d_tracer,
7569 tr, &set_tracer_fops);
7570
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007571 trace_create_file("tracing_cpumask", 0644, d_tracer,
7572 tr, &tracing_cpumask_fops);
7573
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007574 trace_create_file("trace_options", 0644, d_tracer,
7575 tr, &tracing_iter_fops);
7576
7577 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007578 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007579
7580 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007581 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007582
7583 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007584 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007585
7586 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7587 tr, &tracing_total_entries_fops);
7588
Wang YanQing238ae932013-05-26 16:52:01 +08007589 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007590 tr, &tracing_free_buffer_fops);
7591
7592 trace_create_file("trace_marker", 0220, d_tracer,
7593 tr, &tracing_mark_fops);
7594
Steven Rostedtfa32e852016-07-06 15:25:08 -04007595 trace_create_file("trace_marker_raw", 0220, d_tracer,
7596 tr, &tracing_mark_raw_fops);
7597
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007598 trace_create_file("trace_clock", 0644, d_tracer, tr,
7599 &trace_clock_fops);
7600
7601 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007602 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007603
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007604 create_trace_options_dir(tr);
7605
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007606#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007607 trace_create_file("tracing_max_latency", 0644, d_tracer,
7608 &tr->max_latency, &tracing_max_lat_fops);
7609#endif
7610
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007611 if (ftrace_create_function_files(tr, d_tracer))
7612 WARN(1, "Could not allocate function filter files");
7613
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007614#ifdef CONFIG_TRACER_SNAPSHOT
7615 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007616 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007617#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007618
7619 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007620 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007621
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007622 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007623}
7624
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007625static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007626{
7627 struct vfsmount *mnt;
7628 struct file_system_type *type;
7629
7630 /*
7631 * To maintain backward compatibility for tools that mount
7632 * debugfs to get to the tracing facility, tracefs is automatically
7633 * mounted to the debugfs/tracing directory.
7634 */
7635 type = get_fs_type("tracefs");
7636 if (!type)
7637 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007638 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007639 put_filesystem(type);
7640 if (IS_ERR(mnt))
7641 return NULL;
7642 mntget(mnt);
7643
7644 return mnt;
7645}
7646
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007647/**
7648 * tracing_init_dentry - initialize top level trace array
7649 *
7650 * This is called when creating files or directories in the tracing
7651 * directory. It is called via fs_initcall() by any of the boot up code
7652 * and expects to return the dentry of the top level tracing directory.
7653 */
7654struct dentry *tracing_init_dentry(void)
7655{
7656 struct trace_array *tr = &global_trace;
7657
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007658 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007659 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007660 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007661
Jiaxing Wang8b129192015-11-06 16:04:16 +08007662 if (WARN_ON(!tracefs_initialized()) ||
7663 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7664 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007665 return ERR_PTR(-ENODEV);
7666
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007667 /*
7668 * As there may still be users that expect the tracing
7669 * files to exist in debugfs/tracing, we must automount
7670 * the tracefs file system there, so older tools still
7671 * work with the newer kerenl.
7672 */
7673 tr->dir = debugfs_create_automount("tracing", NULL,
7674 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007675 if (!tr->dir) {
7676 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7677 return ERR_PTR(-ENOMEM);
7678 }
7679
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007680 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007681}
7682
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007683extern struct trace_enum_map *__start_ftrace_enum_maps[];
7684extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7685
7686static void __init trace_enum_init(void)
7687{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007688 int len;
7689
7690 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007691 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007692}
7693
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007694#ifdef CONFIG_MODULES
7695static void trace_module_add_enums(struct module *mod)
7696{
7697 if (!mod->num_trace_enums)
7698 return;
7699
7700 /*
7701 * Modules with bad taint do not have events created, do
7702 * not bother with enums either.
7703 */
7704 if (trace_module_has_bad_taint(mod))
7705 return;
7706
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007707 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007708}
7709
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007710#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7711static void trace_module_remove_enums(struct module *mod)
7712{
7713 union trace_enum_map_item *map;
7714 union trace_enum_map_item **last = &trace_enum_maps;
7715
7716 if (!mod->num_trace_enums)
7717 return;
7718
7719 mutex_lock(&trace_enum_mutex);
7720
7721 map = trace_enum_maps;
7722
7723 while (map) {
7724 if (map->head.mod == mod)
7725 break;
7726 map = trace_enum_jmp_to_tail(map);
7727 last = &map->tail.next;
7728 map = map->tail.next;
7729 }
7730 if (!map)
7731 goto out;
7732
7733 *last = trace_enum_jmp_to_tail(map)->tail.next;
7734 kfree(map);
7735 out:
7736 mutex_unlock(&trace_enum_mutex);
7737}
7738#else
7739static inline void trace_module_remove_enums(struct module *mod) { }
7740#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7741
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007742static int trace_module_notify(struct notifier_block *self,
7743 unsigned long val, void *data)
7744{
7745 struct module *mod = data;
7746
7747 switch (val) {
7748 case MODULE_STATE_COMING:
7749 trace_module_add_enums(mod);
7750 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007751 case MODULE_STATE_GOING:
7752 trace_module_remove_enums(mod);
7753 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007754 }
7755
7756 return 0;
7757}
7758
7759static struct notifier_block trace_module_nb = {
7760 .notifier_call = trace_module_notify,
7761 .priority = 0,
7762};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007763#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007764
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007765static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007766{
7767 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007768
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007769 trace_access_lock_init();
7770
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007771 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007772 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007773 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007774
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007775 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007776 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007777
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007778 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007779 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007780
Li Zefan339ae5d2009-04-17 10:34:30 +08007781 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007782 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007783
Avadh Patel69abe6a2009-04-10 16:04:48 -04007784 trace_create_file("saved_cmdlines", 0444, d_tracer,
7785 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007786
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007787 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7788 NULL, &tracing_saved_cmdlines_size_fops);
7789
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007790 trace_enum_init();
7791
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007792 trace_create_enum_file(d_tracer);
7793
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007794#ifdef CONFIG_MODULES
7795 register_module_notifier(&trace_module_nb);
7796#endif
7797
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007798#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007799 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7800 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007801#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007802
Steven Rostedt277ba042012-08-03 16:10:49 -04007803 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007804
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007805 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007806
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007807 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007808}
7809
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007810static int trace_panic_handler(struct notifier_block *this,
7811 unsigned long event, void *unused)
7812{
Steven Rostedt944ac422008-10-23 19:26:08 -04007813 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007814 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007815 return NOTIFY_OK;
7816}
7817
7818static struct notifier_block trace_panic_notifier = {
7819 .notifier_call = trace_panic_handler,
7820 .next = NULL,
7821 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7822};
7823
7824static int trace_die_handler(struct notifier_block *self,
7825 unsigned long val,
7826 void *data)
7827{
7828 switch (val) {
7829 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007830 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007831 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007832 break;
7833 default:
7834 break;
7835 }
7836 return NOTIFY_OK;
7837}
7838
7839static struct notifier_block trace_die_notifier = {
7840 .notifier_call = trace_die_handler,
7841 .priority = 200
7842};
7843
7844/*
7845 * printk is set to max of 1024, we really don't need it that big.
7846 * Nothing should be printing 1000 characters anyway.
7847 */
7848#define TRACE_MAX_PRINT 1000
7849
7850/*
7851 * Define here KERN_TRACE so that we have one place to modify
7852 * it if we decide to change what log level the ftrace dump
7853 * should be at.
7854 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007855#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007856
Jason Wessel955b61e2010-08-05 09:22:23 -05007857void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007858trace_printk_seq(struct trace_seq *s)
7859{
7860 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007861 if (s->seq.len >= TRACE_MAX_PRINT)
7862 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007863
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007864 /*
7865 * More paranoid code. Although the buffer size is set to
7866 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7867 * an extra layer of protection.
7868 */
7869 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7870 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007871
7872 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007873 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007874
7875 printk(KERN_TRACE "%s", s->buffer);
7876
Steven Rostedtf9520752009-03-02 14:04:40 -05007877 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007878}
7879
Jason Wessel955b61e2010-08-05 09:22:23 -05007880void trace_init_global_iter(struct trace_iterator *iter)
7881{
7882 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007883 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007884 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007885 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007886
7887 if (iter->trace && iter->trace->open)
7888 iter->trace->open(iter);
7889
7890 /* Annotate start of buffers if we had overruns */
7891 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7892 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7893
7894 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7895 if (trace_clocks[iter->tr->clock_id].in_ns)
7896 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007897}
7898
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007899void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007900{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007901 /* use static because iter can be a bit big for the stack */
7902 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007903 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007904 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007905 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007906 unsigned long flags;
7907 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007908
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007909 /* Only allow one dump user at a time. */
7910 if (atomic_inc_return(&dump_running) != 1) {
7911 atomic_dec(&dump_running);
7912 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007913 }
7914
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007915 /*
7916 * Always turn off tracing when we dump.
7917 * We don't need to show trace output of what happens
7918 * between multiple crashes.
7919 *
7920 * If the user does a sysrq-z, then they can re-enable
7921 * tracing with echo 1 > tracing_on.
7922 */
7923 tracing_off();
7924
7925 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007926
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007927 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007928 trace_init_global_iter(&iter);
7929
Steven Rostedtd7690412008-10-01 00:29:53 -04007930 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307931 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007932 }
7933
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007934 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007935
Török Edwinb54d3de2008-11-22 13:28:48 +02007936 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007937 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007938
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007939 switch (oops_dump_mode) {
7940 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007941 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007942 break;
7943 case DUMP_ORIG:
7944 iter.cpu_file = raw_smp_processor_id();
7945 break;
7946 case DUMP_NONE:
7947 goto out_enable;
7948 default:
7949 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007950 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007951 }
7952
7953 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007954
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007955 /* Did function tracer already get disabled? */
7956 if (ftrace_is_dead()) {
7957 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7958 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7959 }
7960
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007961 /*
7962 * We need to stop all tracing on all CPUS to read the
7963 * the next buffer. This is a bit expensive, but is
7964 * not done often. We fill all what we can read,
7965 * and then release the locks again.
7966 */
7967
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007968 while (!trace_empty(&iter)) {
7969
7970 if (!cnt)
7971 printk(KERN_TRACE "---------------------------------\n");
7972
7973 cnt++;
7974
7975 /* reset all but tr, trace, and overruns */
7976 memset(&iter.seq, 0,
7977 sizeof(struct trace_iterator) -
7978 offsetof(struct trace_iterator, seq));
7979 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7980 iter.pos = -1;
7981
Jason Wessel955b61e2010-08-05 09:22:23 -05007982 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007983 int ret;
7984
7985 ret = print_trace_line(&iter);
7986 if (ret != TRACE_TYPE_NO_CONSUME)
7987 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007988 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007989 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007990
7991 trace_printk_seq(&iter.seq);
7992 }
7993
7994 if (!cnt)
7995 printk(KERN_TRACE " (ftrace buffer empty)\n");
7996 else
7997 printk(KERN_TRACE "---------------------------------\n");
7998
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007999 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008000 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008001
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008002 for_each_tracing_cpu(cpu) {
8003 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008004 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008005 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008006 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008007}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008008EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008009
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008010__init static int tracer_alloc_buffers(void)
8011{
Steven Rostedt73c51622009-03-11 13:42:01 -04008012 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308013 int ret = -ENOMEM;
8014
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008015 /*
8016 * Make sure we don't accidently add more trace options
8017 * than we have bits for.
8018 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008019 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008020
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308021 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8022 goto out;
8023
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008024 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308025 goto out_free_buffer_mask;
8026
Steven Rostedt07d777f2011-09-22 14:01:55 -04008027 /* Only allocate trace_printk buffers if a trace_printk exists */
8028 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04008029 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04008030 trace_printk_init_buffers();
8031
Steven Rostedt73c51622009-03-11 13:42:01 -04008032 /* To save memory, keep the ring buffer size to its minimum */
8033 if (ring_buffer_expanded)
8034 ring_buf_size = trace_buf_size;
8035 else
8036 ring_buf_size = 1;
8037
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308038 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008039 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008040
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008041 raw_spin_lock_init(&global_trace.start_lock);
8042
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008043 /*
8044 * The prepare callbacks allocates some memory for the ring buffer. We
8045 * don't free the buffer if the if the CPU goes down. If we were to free
8046 * the buffer, then the user would lose any trace that was in the
8047 * buffer. The memory will be removed once the "instance" is removed.
8048 */
8049 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8050 "trace/RB:preapre", trace_rb_cpu_prepare,
8051 NULL);
8052 if (ret < 0)
8053 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008054 /* Used for event triggers */
8055 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8056 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008057 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008058
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008059 if (trace_create_savedcmd() < 0)
8060 goto out_free_temp_buffer;
8061
Steven Rostedtab464282008-05-12 21:21:00 +02008062 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008063 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008064 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8065 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008066 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008067 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04008068
Steven Rostedt499e5472012-02-22 15:50:28 -05008069 if (global_trace.buffer_disabled)
8070 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008071
Steven Rostedte1e232c2014-02-10 23:38:46 -05008072 if (trace_boot_clock) {
8073 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8074 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008075 pr_warn("Trace clock %s not defined, going back to default\n",
8076 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05008077 }
8078
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008079 /*
8080 * register_tracer() might reference current_trace, so it
8081 * needs to be set before we register anything. This is
8082 * just a bootstrap of current_trace anyway.
8083 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008084 global_trace.current_trace = &nop_trace;
8085
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008086 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8087
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05008088 ftrace_init_global_array_ops(&global_trace);
8089
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008090 init_trace_flags_index(&global_trace);
8091
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008092 register_tracer(&nop_trace);
8093
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05008094 /* Function tracing may start here (via kernel command line) */
8095 init_function_trace();
8096
Steven Rostedt60a11772008-05-12 21:20:44 +02008097 /* All seems OK, enable tracing */
8098 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008099
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008100 atomic_notifier_chain_register(&panic_notifier_list,
8101 &trace_panic_notifier);
8102
8103 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008104
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008105 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8106
8107 INIT_LIST_HEAD(&global_trace.systems);
8108 INIT_LIST_HEAD(&global_trace.events);
8109 list_add(&global_trace.list, &ftrace_trace_arrays);
8110
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08008111 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04008112
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008113 register_snapshot_cmd();
8114
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008115 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008116
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008117out_free_savedcmd:
8118 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008119out_free_temp_buffer:
8120 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008121out_rm_hp_state:
8122 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308123out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008124 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308125out_free_buffer_mask:
8126 free_cpumask_var(tracing_buffer_mask);
8127out:
8128 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008129}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008130
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008131void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008132{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008133 if (tracepoint_printk) {
8134 tracepoint_print_iter =
8135 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8136 if (WARN_ON(!tracepoint_print_iter))
8137 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008138 else
8139 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008140 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008141 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008142}
8143
8144void __init trace_init(void)
8145{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008146 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008147}
8148
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008149__init static int clear_boot_tracer(void)
8150{
8151 /*
8152 * The default tracer at boot buffer is an init section.
8153 * This function is called in lateinit. If we did not
8154 * find the boot tracer, then clear it out, to prevent
8155 * later registration from accessing the buffer that is
8156 * about to be freed.
8157 */
8158 if (!default_bootup_tracer)
8159 return 0;
8160
8161 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8162 default_bootup_tracer);
8163 default_bootup_tracer = NULL;
8164
8165 return 0;
8166}
8167
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008168fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008169late_initcall(clear_boot_tracer);