blob: 66b0714f2779dac3ffe3469895575dc72cda9628 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Shashank Mittal43beb422016-05-20 13:06:09 -070044#include <linux/coresight-stm.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020046#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050047#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010049/*
Steven Rostedt73c51622009-03-11 13:42:01 -040050 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050053bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040054
55/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010059 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * at the same time, giving false positive or negative results.
61 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063
Steven Rostedtb2821ae2009-02-02 21:38:32 -050064/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
Li Zefan020e5f82009-07-01 10:47:05 +080067bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050068
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050069/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
72
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010073/* For tracers that don't implement custom flags */
74static struct tracer_opt dummy_tracer_opt[] = {
75 { }
76};
77
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050078static int
79dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010080{
81 return 0;
82}
Steven Rostedt0f048702008-11-05 16:05:44 -050083
84/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040085 * To prevent the comm cache from being overwritten when no
86 * tracing is active, only save the comm when a trace event
87 * occurred.
88 */
89static DEFINE_PER_CPU(bool, trace_cmdline_save);
90
91/*
Steven Rostedt0f048702008-11-05 16:05:44 -050092 * Kill all tracing for good (never come back).
93 * It is initialized to 1 but will turn to zero if the initialization
94 * of the tracer is successful. But that is the only place that sets
95 * this back to zero.
96 */
Hannes Eder4fd27352009-02-10 19:44:12 +010097static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400122#ifdef CONFIG_TRACE_ENUM_MAP_FILE
123/* Map of enums to their values, for "enum_map" file */
124struct trace_enum_map_head {
125 struct module *mod;
126 unsigned long length;
127};
128
129union trace_enum_map_item;
130
131struct trace_enum_map_tail {
132 /*
133 * "end" is first and points to NULL as it must be different
134 * than "mod" or "enum_string"
135 */
136 union trace_enum_map_item *next;
137 const char *end; /* points to NULL */
138};
139
140static DEFINE_MUTEX(trace_enum_mutex);
141
142/*
143 * The trace_enum_maps are saved in an array with two extra elements,
144 * one at the beginning, and one at the end. The beginning item contains
145 * the count of the saved maps (head.length), and the module they
146 * belong to if not built in (head.mod). The ending item contains a
147 * pointer to the next array of saved enum_map items.
148 */
149union trace_enum_map_item {
150 struct trace_enum_map map;
151 struct trace_enum_map_head head;
152 struct trace_enum_map_tail tail;
153};
154
155static union trace_enum_map_item *trace_enum_maps;
156#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
157
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500158static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500159
Li Zefanee6c2c12009-09-18 14:06:47 +0800160#define MAX_TRACER_SIZE 100
161static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500162static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100163
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164static bool allocate_snapshot;
165
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200166static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100167{
Chen Gang67012ab2013-04-08 12:06:44 +0800168 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500169 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400170 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100172 return 1;
173}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200174__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100175
Steven Rostedt944ac422008-10-23 19:26:08 -0400176static int __init set_ftrace_dump_on_oops(char *str)
177{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200178 if (*str++ != '=' || !*str) {
179 ftrace_dump_on_oops = DUMP_ALL;
180 return 1;
181 }
182
183 if (!strcmp("orig_cpu", str)) {
184 ftrace_dump_on_oops = DUMP_ORIG;
185 return 1;
186 }
187
188 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400189}
190__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200191
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400192static int __init stop_trace_on_warning(char *str)
193{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200194 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
195 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400196 return 1;
197}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200198__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400199
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400200static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500201{
202 allocate_snapshot = true;
203 /* We also need the main ring buffer expanded */
204 ring_buffer_expanded = true;
205 return 1;
206}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400207__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500208
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400209
210static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400211
212static int __init set_trace_boot_options(char *str)
213{
Chen Gang67012ab2013-04-08 12:06:44 +0800214 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400215 return 0;
216}
217__setup("trace_options=", set_trace_boot_options);
218
Steven Rostedte1e232c2014-02-10 23:38:46 -0500219static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
220static char *trace_boot_clock __initdata;
221
222static int __init set_trace_boot_clock(char *str)
223{
224 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
225 trace_boot_clock = trace_boot_clock_buf;
226 return 0;
227}
228__setup("trace_clock=", set_trace_boot_clock);
229
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500230static int __init set_tracepoint_printk(char *str)
231{
232 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
233 tracepoint_printk = 1;
234 return 1;
235}
236__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400237
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800238unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200239{
240 nsec += 500;
241 do_div(nsec, 1000);
242 return nsec;
243}
244
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400245/* trace_flags holds trace_options default values */
246#define TRACE_DEFAULT_FLAGS \
247 (FUNCTION_DEFAULT_FLAGS | \
248 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
249 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
250 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
251 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
252
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400253/* trace_options that are only supported by global_trace */
254#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
255 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
256
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400257/* trace_flags that are default zero for instances */
258#define ZEROED_TRACE_FLAGS \
259 TRACE_ITER_EVENT_FORK
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400260
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200261/*
262 * The global_trace is the descriptor that holds the tracing
263 * buffers for the live tracing. For each CPU, it contains
264 * a link list of pages that will store trace entries. The
265 * page descriptor of the pages in the memory is used to hold
266 * the link list by linking the lru item in the page descriptor
267 * to each of the pages in the buffer per CPU.
268 *
269 * For each active CPU there is a data field that holds the
270 * pages for the buffer for that CPU. Each CPU has the same number
271 * of pages allocated for its buffer.
272 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400273static struct trace_array global_trace = {
274 .trace_flags = TRACE_DEFAULT_FLAGS,
275};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200276
Steven Rostedtae63b312012-05-03 23:09:03 -0400277LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200278
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400279int trace_array_get(struct trace_array *this_tr)
280{
281 struct trace_array *tr;
282 int ret = -ENODEV;
283
284 mutex_lock(&trace_types_lock);
285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
286 if (tr == this_tr) {
287 tr->ref++;
288 ret = 0;
289 break;
290 }
291 }
292 mutex_unlock(&trace_types_lock);
293
294 return ret;
295}
296
297static void __trace_array_put(struct trace_array *this_tr)
298{
299 WARN_ON(!this_tr->ref);
300 this_tr->ref--;
301}
302
303void trace_array_put(struct trace_array *this_tr)
304{
305 mutex_lock(&trace_types_lock);
306 __trace_array_put(this_tr);
307 mutex_unlock(&trace_types_lock);
308}
309
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400310int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500311 struct ring_buffer *buffer,
312 struct ring_buffer_event *event)
313{
314 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
315 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400316 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500317 return 1;
318 }
319
320 return 0;
321}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500322
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400323void trace_free_pid_list(struct trace_pid_list *pid_list)
324{
325 vfree(pid_list->pids);
326 kfree(pid_list);
327}
328
Steven Rostedtd8275c42016-04-14 12:15:22 -0400329/**
330 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
331 * @filtered_pids: The list of pids to check
332 * @search_pid: The PID to find in @filtered_pids
333 *
334 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
335 */
336bool
337trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
338{
339 /*
340 * If pid_max changed after filtered_pids was created, we
341 * by default ignore all pids greater than the previous pid_max.
342 */
343 if (search_pid >= filtered_pids->pid_max)
344 return false;
345
346 return test_bit(search_pid, filtered_pids->pids);
347}
348
349/**
350 * trace_ignore_this_task - should a task be ignored for tracing
351 * @filtered_pids: The list of pids to check
352 * @task: The task that should be ignored if not filtered
353 *
354 * Checks if @task should be traced or not from @filtered_pids.
355 * Returns true if @task should *NOT* be traced.
356 * Returns false if @task should be traced.
357 */
358bool
359trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
360{
361 /*
362 * Return false, because if filtered_pids does not exist,
363 * all pids are good to trace.
364 */
365 if (!filtered_pids)
366 return false;
367
368 return !trace_find_filtered_pid(filtered_pids, task->pid);
369}
370
371/**
372 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
373 * @pid_list: The list to modify
374 * @self: The current task for fork or NULL for exit
375 * @task: The task to add or remove
376 *
377 * If adding a task, if @self is defined, the task is only added if @self
378 * is also included in @pid_list. This happens on fork and tasks should
379 * only be added when the parent is listed. If @self is NULL, then the
380 * @task pid will be removed from the list, which would happen on exit
381 * of a task.
382 */
383void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
384 struct task_struct *self,
385 struct task_struct *task)
386{
387 if (!pid_list)
388 return;
389
390 /* For forks, we only add if the forking task is listed */
391 if (self) {
392 if (!trace_find_filtered_pid(pid_list, self->pid))
393 return;
394 }
395
396 /* Sorry, but we don't support pid_max changing after setting */
397 if (task->pid >= pid_list->pid_max)
398 return;
399
400 /* "self" is set for forks, and NULL for exits */
401 if (self)
402 set_bit(task->pid, pid_list->pids);
403 else
404 clear_bit(task->pid, pid_list->pids);
405}
406
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400407/**
408 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
409 * @pid_list: The pid list to show
410 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
411 * @pos: The position of the file
412 *
413 * This is used by the seq_file "next" operation to iterate the pids
414 * listed in a trace_pid_list structure.
415 *
416 * Returns the pid+1 as we want to display pid of zero, but NULL would
417 * stop the iteration.
418 */
419void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
420{
421 unsigned long pid = (unsigned long)v;
422
423 (*pos)++;
424
425 /* pid already is +1 of the actual prevous bit */
426 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
427
428 /* Return pid + 1 to allow zero to be represented */
429 if (pid < pid_list->pid_max)
430 return (void *)(pid + 1);
431
432 return NULL;
433}
434
435/**
436 * trace_pid_start - Used for seq_file to start reading pid lists
437 * @pid_list: The pid list to show
438 * @pos: The position of the file
439 *
440 * This is used by seq_file "start" operation to start the iteration
441 * of listing pids.
442 *
443 * Returns the pid+1 as we want to display pid of zero, but NULL would
444 * stop the iteration.
445 */
446void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
447{
448 unsigned long pid;
449 loff_t l = 0;
450
451 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
452 if (pid >= pid_list->pid_max)
453 return NULL;
454
455 /* Return pid + 1 so that zero can be the exit value */
456 for (pid++; pid && l < *pos;
457 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
458 ;
459 return (void *)pid;
460}
461
462/**
463 * trace_pid_show - show the current pid in seq_file processing
464 * @m: The seq_file structure to write into
465 * @v: A void pointer of the pid (+1) value to display
466 *
467 * Can be directly used by seq_file operations to display the current
468 * pid value.
469 */
470int trace_pid_show(struct seq_file *m, void *v)
471{
472 unsigned long pid = (unsigned long)v - 1;
473
474 seq_printf(m, "%lu\n", pid);
475 return 0;
476}
477
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400478/* 128 should be much more than enough */
479#define PID_BUF_SIZE 127
480
481int trace_pid_write(struct trace_pid_list *filtered_pids,
482 struct trace_pid_list **new_pid_list,
483 const char __user *ubuf, size_t cnt)
484{
485 struct trace_pid_list *pid_list;
486 struct trace_parser parser;
487 unsigned long val;
488 int nr_pids = 0;
489 ssize_t read = 0;
490 ssize_t ret = 0;
491 loff_t pos;
492 pid_t pid;
493
494 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
495 return -ENOMEM;
496
497 /*
498 * Always recreate a new array. The write is an all or nothing
499 * operation. Always create a new array when adding new pids by
500 * the user. If the operation fails, then the current list is
501 * not modified.
502 */
503 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
504 if (!pid_list)
505 return -ENOMEM;
506
507 pid_list->pid_max = READ_ONCE(pid_max);
508
509 /* Only truncating will shrink pid_max */
510 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
511 pid_list->pid_max = filtered_pids->pid_max;
512
513 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
514 if (!pid_list->pids) {
515 kfree(pid_list);
516 return -ENOMEM;
517 }
518
519 if (filtered_pids) {
520 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000521 for_each_set_bit(pid, filtered_pids->pids,
522 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400523 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400524 nr_pids++;
525 }
526 }
527
528 while (cnt > 0) {
529
530 pos = 0;
531
532 ret = trace_get_user(&parser, ubuf, cnt, &pos);
533 if (ret < 0 || !trace_parser_loaded(&parser))
534 break;
535
536 read += ret;
537 ubuf += ret;
538 cnt -= ret;
539
540 parser.buffer[parser.idx] = 0;
541
542 ret = -EINVAL;
543 if (kstrtoul(parser.buffer, 0, &val))
544 break;
545 if (val >= pid_list->pid_max)
546 break;
547
548 pid = (pid_t)val;
549
550 set_bit(pid, pid_list->pids);
551 nr_pids++;
552
553 trace_parser_clear(&parser);
554 ret = 0;
555 }
556 trace_parser_put(&parser);
557
558 if (ret < 0) {
559 trace_free_pid_list(pid_list);
560 return ret;
561 }
562
563 if (!nr_pids) {
564 /* Cleared the list of pids */
565 trace_free_pid_list(pid_list);
566 read = ret;
567 pid_list = NULL;
568 }
569
570 *new_pid_list = pid_list;
571
572 return read;
573}
574
Fabian Frederickad1438a2014-04-17 21:44:42 +0200575static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400576{
577 u64 ts;
578
579 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700580 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400581 return trace_clock_local();
582
Alexander Z Lam94571582013-08-02 18:36:16 -0700583 ts = ring_buffer_time_stamp(buf->buffer, cpu);
584 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400585
586 return ts;
587}
588
Alexander Z Lam94571582013-08-02 18:36:16 -0700589cycle_t ftrace_now(int cpu)
590{
591 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
592}
593
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400594/**
595 * tracing_is_enabled - Show if global_trace has been disabled
596 *
597 * Shows if the global trace has been enabled or not. It uses the
598 * mirror flag "buffer_disabled" to be used in fast paths such as for
599 * the irqsoff tracer. But it may be inaccurate due to races. If you
600 * need to know the accurate state, use tracing_is_on() which is a little
601 * slower, but accurate.
602 */
Steven Rostedt90369902008-11-05 16:05:44 -0500603int tracing_is_enabled(void)
604{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400605 /*
606 * For quick access (irqsoff uses this in fast path), just
607 * return the mirror variable of the state of the ring buffer.
608 * It's a little racy, but we don't really care.
609 */
610 smp_rmb();
611 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500612}
613
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200614/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400615 * trace_buf_size is the size in bytes that is allocated
616 * for a buffer. Note, the number of bytes is always rounded
617 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400618 *
619 * This number is purposely set to a low number of 16384.
620 * If the dump on oops happens, it will be much appreciated
621 * to not have to wait for all that output. Anyway this can be
622 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400624#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400625
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400626static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200627
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200629static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200630
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200631/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200632 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200633 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700634DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200635
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800636/*
637 * serialize the access of the ring buffer
638 *
639 * ring buffer serializes readers, but it is low level protection.
640 * The validity of the events (which returns by ring_buffer_peek() ..etc)
641 * are not protected by ring buffer.
642 *
643 * The content of events may become garbage if we allow other process consumes
644 * these events concurrently:
645 * A) the page of the consumed events may become a normal page
646 * (not reader page) in ring buffer, and this page will be rewrited
647 * by events producer.
648 * B) The page of the consumed events may become a page for splice_read,
649 * and this page will be returned to system.
650 *
651 * These primitives allow multi process access to different cpu ring buffer
652 * concurrently.
653 *
654 * These primitives don't distinguish read-only and read-consume access.
655 * Multi read-only access are also serialized.
656 */
657
658#ifdef CONFIG_SMP
659static DECLARE_RWSEM(all_cpu_access_lock);
660static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
661
662static inline void trace_access_lock(int cpu)
663{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500664 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800665 /* gain it for accessing the whole ring buffer. */
666 down_write(&all_cpu_access_lock);
667 } else {
668 /* gain it for accessing a cpu ring buffer. */
669
Steven Rostedtae3b5092013-01-23 15:22:59 -0500670 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800671 down_read(&all_cpu_access_lock);
672
673 /* Secondly block other access to this @cpu ring buffer. */
674 mutex_lock(&per_cpu(cpu_access_lock, cpu));
675 }
676}
677
678static inline void trace_access_unlock(int cpu)
679{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500680 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800681 up_write(&all_cpu_access_lock);
682 } else {
683 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
684 up_read(&all_cpu_access_lock);
685 }
686}
687
688static inline void trace_access_lock_init(void)
689{
690 int cpu;
691
692 for_each_possible_cpu(cpu)
693 mutex_init(&per_cpu(cpu_access_lock, cpu));
694}
695
696#else
697
698static DEFINE_MUTEX(access_lock);
699
700static inline void trace_access_lock(int cpu)
701{
702 (void)cpu;
703 mutex_lock(&access_lock);
704}
705
706static inline void trace_access_unlock(int cpu)
707{
708 (void)cpu;
709 mutex_unlock(&access_lock);
710}
711
712static inline void trace_access_lock_init(void)
713{
714}
715
716#endif
717
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400718#ifdef CONFIG_STACKTRACE
719static void __ftrace_trace_stack(struct ring_buffer *buffer,
720 unsigned long flags,
721 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400722static inline void ftrace_trace_stack(struct trace_array *tr,
723 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400724 unsigned long flags,
725 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400726
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400727#else
728static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
729 unsigned long flags,
730 int skip, int pc, struct pt_regs *regs)
731{
732}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400733static inline void ftrace_trace_stack(struct trace_array *tr,
734 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400735 unsigned long flags,
736 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400737{
738}
739
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400740#endif
741
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400742static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400743{
744 if (tr->trace_buffer.buffer)
745 ring_buffer_record_on(tr->trace_buffer.buffer);
746 /*
747 * This flag is looked at when buffers haven't been allocated
748 * yet, or by some tracers (like irqsoff), that just want to
749 * know if the ring buffer has been disabled, but it can handle
750 * races of where it gets disabled but we still do a record.
751 * As the check is in the fast path of the tracers, it is more
752 * important to be fast than accurate.
753 */
754 tr->buffer_disabled = 0;
755 /* Make the flag seen by readers */
756 smp_wmb();
757}
758
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200759/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500760 * tracing_on - enable tracing buffers
761 *
762 * This function enables tracing buffers that may have been
763 * disabled with tracing_off.
764 */
765void tracing_on(void)
766{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400767 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500768}
769EXPORT_SYMBOL_GPL(tracing_on);
770
771/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500772 * __trace_puts - write a constant string into the trace buffer.
773 * @ip: The address of the caller
774 * @str: The constant string to write
775 * @size: The size of the string.
776 */
777int __trace_puts(unsigned long ip, const char *str, int size)
778{
779 struct ring_buffer_event *event;
780 struct ring_buffer *buffer;
781 struct print_entry *entry;
782 unsigned long irq_flags;
783 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800784 int pc;
785
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400786 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800787 return 0;
788
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800789 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500790
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500791 if (unlikely(tracing_selftest_running || tracing_disabled))
792 return 0;
793
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500794 alloc = sizeof(*entry) + size + 2; /* possible \n added */
795
796 local_save_flags(irq_flags);
797 buffer = global_trace.trace_buffer.buffer;
798 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800799 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500800 if (!event)
801 return 0;
802
803 entry = ring_buffer_event_data(event);
804 entry->ip = ip;
805
806 memcpy(&entry->buf, str, size);
807
808 /* Add a newline if necessary */
809 if (entry->buf[size - 1] != '\n') {
810 entry->buf[size] = '\n';
811 entry->buf[size + 1] = '\0';
812 } else
813 entry->buf[size] = '\0';
814
815 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400816 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500817
818 return size;
819}
820EXPORT_SYMBOL_GPL(__trace_puts);
821
822/**
823 * __trace_bputs - write the pointer to a constant string into trace buffer
824 * @ip: The address of the caller
825 * @str: The constant string to write to the buffer to
826 */
827int __trace_bputs(unsigned long ip, const char *str)
828{
829 struct ring_buffer_event *event;
830 struct ring_buffer *buffer;
831 struct bputs_entry *entry;
832 unsigned long irq_flags;
833 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800834 int pc;
835
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400836 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800837 return 0;
838
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800839 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500840
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500841 if (unlikely(tracing_selftest_running || tracing_disabled))
842 return 0;
843
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500844 local_save_flags(irq_flags);
845 buffer = global_trace.trace_buffer.buffer;
846 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800847 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500848 if (!event)
849 return 0;
850
851 entry = ring_buffer_event_data(event);
852 entry->ip = ip;
853 entry->str = str;
854
855 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400856 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500857
858 return 1;
859}
860EXPORT_SYMBOL_GPL(__trace_bputs);
861
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500862#ifdef CONFIG_TRACER_SNAPSHOT
863/**
864 * trace_snapshot - take a snapshot of the current buffer.
865 *
866 * This causes a swap between the snapshot buffer and the current live
867 * tracing buffer. You can use this to take snapshots of the live
868 * trace when some condition is triggered, but continue to trace.
869 *
870 * Note, make sure to allocate the snapshot with either
871 * a tracing_snapshot_alloc(), or by doing it manually
872 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
873 *
874 * If the snapshot buffer is not allocated, it will stop tracing.
875 * Basically making a permanent snapshot.
876 */
877void tracing_snapshot(void)
878{
879 struct trace_array *tr = &global_trace;
880 struct tracer *tracer = tr->current_trace;
881 unsigned long flags;
882
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500883 if (in_nmi()) {
884 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
885 internal_trace_puts("*** snapshot is being ignored ***\n");
886 return;
887 }
888
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500889 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500890 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
891 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500892 tracing_off();
893 return;
894 }
895
896 /* Note, snapshot can not be used when the tracer uses it */
897 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500898 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
899 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500900 return;
901 }
902
903 local_irq_save(flags);
904 update_max_tr(tr, current, smp_processor_id());
905 local_irq_restore(flags);
906}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500907EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500908
909static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
910 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400911static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
912
913static int alloc_snapshot(struct trace_array *tr)
914{
915 int ret;
916
917 if (!tr->allocated_snapshot) {
918
919 /* allocate spare buffer */
920 ret = resize_buffer_duplicate_size(&tr->max_buffer,
921 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
922 if (ret < 0)
923 return ret;
924
925 tr->allocated_snapshot = true;
926 }
927
928 return 0;
929}
930
Fabian Frederickad1438a2014-04-17 21:44:42 +0200931static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400932{
933 /*
934 * We don't free the ring buffer. instead, resize it because
935 * The max_tr ring buffer has some state (e.g. ring->clock) and
936 * we want preserve it.
937 */
938 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
939 set_buffer_entries(&tr->max_buffer, 1);
940 tracing_reset_online_cpus(&tr->max_buffer);
941 tr->allocated_snapshot = false;
942}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500943
944/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500945 * tracing_alloc_snapshot - allocate snapshot buffer.
946 *
947 * This only allocates the snapshot buffer if it isn't already
948 * allocated - it doesn't also take a snapshot.
949 *
950 * This is meant to be used in cases where the snapshot buffer needs
951 * to be set up for events that can't sleep but need to be able to
952 * trigger a snapshot.
953 */
954int tracing_alloc_snapshot(void)
955{
956 struct trace_array *tr = &global_trace;
957 int ret;
958
959 ret = alloc_snapshot(tr);
960 WARN_ON(ret < 0);
961
962 return ret;
963}
964EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
965
966/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500967 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
968 *
969 * This is similar to trace_snapshot(), but it will allocate the
970 * snapshot buffer if it isn't already allocated. Use this only
971 * where it is safe to sleep, as the allocation may sleep.
972 *
973 * This causes a swap between the snapshot buffer and the current live
974 * tracing buffer. You can use this to take snapshots of the live
975 * trace when some condition is triggered, but continue to trace.
976 */
977void tracing_snapshot_alloc(void)
978{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500979 int ret;
980
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500981 ret = tracing_alloc_snapshot();
982 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400983 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500984
985 tracing_snapshot();
986}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500987EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500988#else
989void tracing_snapshot(void)
990{
991 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
992}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500993EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500994int tracing_alloc_snapshot(void)
995{
996 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
997 return -ENODEV;
998}
999EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001000void tracing_snapshot_alloc(void)
1001{
1002 /* Give warning */
1003 tracing_snapshot();
1004}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001005EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001006#endif /* CONFIG_TRACER_SNAPSHOT */
1007
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001008static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001009{
1010 if (tr->trace_buffer.buffer)
1011 ring_buffer_record_off(tr->trace_buffer.buffer);
1012 /*
1013 * This flag is looked at when buffers haven't been allocated
1014 * yet, or by some tracers (like irqsoff), that just want to
1015 * know if the ring buffer has been disabled, but it can handle
1016 * races of where it gets disabled but we still do a record.
1017 * As the check is in the fast path of the tracers, it is more
1018 * important to be fast than accurate.
1019 */
1020 tr->buffer_disabled = 1;
1021 /* Make the flag seen by readers */
1022 smp_wmb();
1023}
1024
Steven Rostedt499e5472012-02-22 15:50:28 -05001025/**
1026 * tracing_off - turn off tracing buffers
1027 *
1028 * This function stops the tracing buffers from recording data.
1029 * It does not disable any overhead the tracers themselves may
1030 * be causing. This function simply causes all recording to
1031 * the ring buffers to fail.
1032 */
1033void tracing_off(void)
1034{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001035 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001036}
1037EXPORT_SYMBOL_GPL(tracing_off);
1038
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001039void disable_trace_on_warning(void)
1040{
1041 if (__disable_trace_on_warning)
1042 tracing_off();
1043}
1044
Steven Rostedt499e5472012-02-22 15:50:28 -05001045/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001046 * tracer_tracing_is_on - show real state of ring buffer enabled
1047 * @tr : the trace array to know if ring buffer is enabled
1048 *
1049 * Shows real state of the ring buffer if it is enabled or not.
1050 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001051int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001052{
1053 if (tr->trace_buffer.buffer)
1054 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1055 return !tr->buffer_disabled;
1056}
1057
Steven Rostedt499e5472012-02-22 15:50:28 -05001058/**
1059 * tracing_is_on - show state of ring buffers enabled
1060 */
1061int tracing_is_on(void)
1062{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001063 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001064}
1065EXPORT_SYMBOL_GPL(tracing_is_on);
1066
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001067static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001068{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001069 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001070
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001071 if (!str)
1072 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001073 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001074 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001075 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001076 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001077 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001078 return 1;
1079}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001080__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001081
Tim Bird0e950172010-02-25 15:36:43 -08001082static int __init set_tracing_thresh(char *str)
1083{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001084 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001085 int ret;
1086
1087 if (!str)
1088 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001089 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001090 if (ret < 0)
1091 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001092 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001093 return 1;
1094}
1095__setup("tracing_thresh=", set_tracing_thresh);
1096
Steven Rostedt57f50be2008-05-12 21:20:44 +02001097unsigned long nsecs_to_usecs(unsigned long nsecs)
1098{
1099 return nsecs / 1000;
1100}
1101
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001102/*
1103 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1104 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1105 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1106 * of strings in the order that the enums were defined.
1107 */
1108#undef C
1109#define C(a, b) b
1110
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001111/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001112static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001113 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001114 NULL
1115};
1116
Zhaolei5079f322009-08-25 16:12:56 +08001117static struct {
1118 u64 (*func)(void);
1119 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001120 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001121} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001122 { trace_clock_local, "local", 1 },
1123 { trace_clock_global, "global", 1 },
1124 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001125 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001126 { trace_clock, "perf", 1 },
1127 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001128 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Joel Fernandes59cbbe32016-11-28 14:35:23 -08001129 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001130 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001131};
1132
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001133/*
1134 * trace_parser_get_init - gets the buffer for trace parser
1135 */
1136int trace_parser_get_init(struct trace_parser *parser, int size)
1137{
1138 memset(parser, 0, sizeof(*parser));
1139
1140 parser->buffer = kmalloc(size, GFP_KERNEL);
1141 if (!parser->buffer)
1142 return 1;
1143
1144 parser->size = size;
1145 return 0;
1146}
1147
1148/*
1149 * trace_parser_put - frees the buffer for trace parser
1150 */
1151void trace_parser_put(struct trace_parser *parser)
1152{
1153 kfree(parser->buffer);
1154}
1155
1156/*
1157 * trace_get_user - reads the user input string separated by space
1158 * (matched by isspace(ch))
1159 *
1160 * For each string found the 'struct trace_parser' is updated,
1161 * and the function returns.
1162 *
1163 * Returns number of bytes read.
1164 *
1165 * See kernel/trace/trace.h for 'struct trace_parser' details.
1166 */
1167int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1168 size_t cnt, loff_t *ppos)
1169{
1170 char ch;
1171 size_t read = 0;
1172 ssize_t ret;
1173
1174 if (!*ppos)
1175 trace_parser_clear(parser);
1176
1177 ret = get_user(ch, ubuf++);
1178 if (ret)
1179 goto out;
1180
1181 read++;
1182 cnt--;
1183
1184 /*
1185 * The parser is not finished with the last write,
1186 * continue reading the user input without skipping spaces.
1187 */
1188 if (!parser->cont) {
1189 /* skip white space */
1190 while (cnt && isspace(ch)) {
1191 ret = get_user(ch, ubuf++);
1192 if (ret)
1193 goto out;
1194 read++;
1195 cnt--;
1196 }
1197
1198 /* only spaces were written */
1199 if (isspace(ch)) {
1200 *ppos += read;
1201 ret = read;
1202 goto out;
1203 }
1204
1205 parser->idx = 0;
1206 }
1207
1208 /* read the non-space input */
1209 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001210 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001211 parser->buffer[parser->idx++] = ch;
1212 else {
1213 ret = -EINVAL;
1214 goto out;
1215 }
1216 ret = get_user(ch, ubuf++);
1217 if (ret)
1218 goto out;
1219 read++;
1220 cnt--;
1221 }
1222
1223 /* We either got finished input or we have to wait for another call. */
1224 if (isspace(ch)) {
1225 parser->buffer[parser->idx] = 0;
1226 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001227 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001228 parser->cont = true;
1229 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001230 } else {
1231 ret = -EINVAL;
1232 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001233 }
1234
1235 *ppos += read;
1236 ret = read;
1237
1238out:
1239 return ret;
1240}
1241
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001242/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001243static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001244{
1245 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001246
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001247 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001248 return -EBUSY;
1249
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001250 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001251 if (cnt > len)
1252 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001253 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001254
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001255 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001256 return cnt;
1257}
1258
Tim Bird0e950172010-02-25 15:36:43 -08001259unsigned long __read_mostly tracing_thresh;
1260
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001261#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001262/*
1263 * Copy the new maximum trace into the separate maximum-trace
1264 * structure. (this way the maximum trace is permanently saved,
1265 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1266 */
1267static void
1268__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1269{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001270 struct trace_buffer *trace_buf = &tr->trace_buffer;
1271 struct trace_buffer *max_buf = &tr->max_buffer;
1272 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1273 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001274
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001275 max_buf->cpu = cpu;
1276 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001277
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001278 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001279 max_data->critical_start = data->critical_start;
1280 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001281
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001282 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001283 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001284 /*
1285 * If tsk == current, then use current_uid(), as that does not use
1286 * RCU. The irq tracer can be called out of RCU scope.
1287 */
1288 if (tsk == current)
1289 max_data->uid = current_uid();
1290 else
1291 max_data->uid = task_uid(tsk);
1292
Steven Rostedt8248ac02009-09-02 12:27:41 -04001293 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1294 max_data->policy = tsk->policy;
1295 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001296
1297 /* record this tasks comm */
1298 tracing_record_cmdline(tsk);
1299}
1300
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001301/**
1302 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1303 * @tr: tracer
1304 * @tsk: the task with the latency
1305 * @cpu: The cpu that initiated the trace.
1306 *
1307 * Flip the buffers between the @tr and the max_tr and record information
1308 * about which task was the cause of this latency.
1309 */
Ingo Molnare309b412008-05-12 21:20:51 +02001310void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001311update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1312{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001313 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001314
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001315 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001316 return;
1317
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001318 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001319
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001320 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001321 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001322 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001323 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001324 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001325
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001326 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001327
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001328 buf = tr->trace_buffer.buffer;
1329 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1330 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001331
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001332 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001333 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001334}
1335
1336/**
1337 * update_max_tr_single - only copy one trace over, and reset the rest
1338 * @tr - tracer
1339 * @tsk - task with the latency
1340 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001341 *
1342 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001343 */
Ingo Molnare309b412008-05-12 21:20:51 +02001344void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001345update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1346{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001347 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001348
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001349 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001350 return;
1351
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001352 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001353 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001354 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001355 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001356 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001357 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001358
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001359 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001360
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001361 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001362
Steven Rostedte8165db2009-09-03 19:13:05 -04001363 if (ret == -EBUSY) {
1364 /*
1365 * We failed to swap the buffer due to a commit taking
1366 * place on this CPU. We fail to record, but we reset
1367 * the max trace buffer (no one writes directly to it)
1368 * and flag that it failed.
1369 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001370 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001371 "Failed to swap buffers due to commit in progress\n");
1372 }
1373
Steven Rostedte8165db2009-09-03 19:13:05 -04001374 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001375
1376 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001377 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001378}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001379#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001380
Rabin Vincente30f53a2014-11-10 19:46:34 +01001381static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001382{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001383 /* Iterators are static, they should be filled or empty */
1384 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001385 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001386
Rabin Vincente30f53a2014-11-10 19:46:34 +01001387 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1388 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001389}
1390
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001391#ifdef CONFIG_FTRACE_STARTUP_TEST
1392static int run_tracer_selftest(struct tracer *type)
1393{
1394 struct trace_array *tr = &global_trace;
1395 struct tracer *saved_tracer = tr->current_trace;
1396 int ret;
1397
1398 if (!type->selftest || tracing_selftest_disabled)
1399 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001400
1401 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001402 * Run a selftest on this tracer.
1403 * Here we reset the trace buffer, and set the current
1404 * tracer to be this tracer. The tracer can then run some
1405 * internal tracing to verify that everything is in order.
1406 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001407 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001408 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001409
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001410 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001411
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001412#ifdef CONFIG_TRACER_MAX_TRACE
1413 if (type->use_max_tr) {
1414 /* If we expanded the buffers, make sure the max is expanded too */
1415 if (ring_buffer_expanded)
1416 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1417 RING_BUFFER_ALL_CPUS);
1418 tr->allocated_snapshot = true;
1419 }
1420#endif
1421
1422 /* the test is responsible for initializing and enabling */
1423 pr_info("Testing tracer %s: ", type->name);
1424 ret = type->selftest(type, tr);
1425 /* the test is responsible for resetting too */
1426 tr->current_trace = saved_tracer;
1427 if (ret) {
1428 printk(KERN_CONT "FAILED!\n");
1429 /* Add the warning after printing 'FAILED' */
1430 WARN_ON(1);
1431 return -1;
1432 }
1433 /* Only reset on passing, to avoid touching corrupted buffers */
1434 tracing_reset_online_cpus(&tr->trace_buffer);
1435
1436#ifdef CONFIG_TRACER_MAX_TRACE
1437 if (type->use_max_tr) {
1438 tr->allocated_snapshot = false;
1439
1440 /* Shrink the max buffer again */
1441 if (ring_buffer_expanded)
1442 ring_buffer_resize(tr->max_buffer.buffer, 1,
1443 RING_BUFFER_ALL_CPUS);
1444 }
1445#endif
1446
1447 printk(KERN_CONT "PASSED\n");
1448 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001449}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001450#else
1451static inline int run_tracer_selftest(struct tracer *type)
1452{
1453 return 0;
1454}
1455#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001456
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001457static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1458
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001459static void __init apply_trace_boot_options(void);
1460
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001461/**
1462 * register_tracer - register a tracer with the ftrace system.
1463 * @type - the plugin for the tracer
1464 *
1465 * Register a new plugin tracer.
1466 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001467int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001468{
1469 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001470 int ret = 0;
1471
1472 if (!type->name) {
1473 pr_info("Tracer must have a name\n");
1474 return -1;
1475 }
1476
Dan Carpenter24a461d2010-07-10 12:06:44 +02001477 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001478 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1479 return -1;
1480 }
1481
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001483
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001484 tracing_selftest_running = true;
1485
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001486 for (t = trace_types; t; t = t->next) {
1487 if (strcmp(type->name, t->name) == 0) {
1488 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001489 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001490 type->name);
1491 ret = -1;
1492 goto out;
1493 }
1494 }
1495
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001496 if (!type->set_flag)
1497 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001498 if (!type->flags) {
1499 /*allocate a dummy tracer_flags*/
1500 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001501 if (!type->flags) {
1502 ret = -ENOMEM;
1503 goto out;
1504 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001505 type->flags->val = 0;
1506 type->flags->opts = dummy_tracer_opt;
1507 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001508 if (!type->flags->opts)
1509 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001510
Chunyu Hud39cdd22016-03-08 21:37:01 +08001511 /* store the tracer for __set_tracer_option */
1512 type->flags->trace = type;
1513
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001514 ret = run_tracer_selftest(type);
1515 if (ret < 0)
1516 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001517
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001518 type->next = trace_types;
1519 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001520 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001521
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001522 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001523 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001524 mutex_unlock(&trace_types_lock);
1525
Steven Rostedtdac74942009-02-05 01:13:38 -05001526 if (ret || !default_bootup_tracer)
1527 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001528
Li Zefanee6c2c12009-09-18 14:06:47 +08001529 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001530 goto out_unlock;
1531
1532 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1533 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001534 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001535 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001536
1537 apply_trace_boot_options();
1538
Steven Rostedtdac74942009-02-05 01:13:38 -05001539 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001540 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001541#ifdef CONFIG_FTRACE_STARTUP_TEST
1542 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1543 type->name);
1544#endif
1545
1546 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001547 return ret;
1548}
1549
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001550void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001551{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001552 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001553
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001554 if (!buffer)
1555 return;
1556
Steven Rostedtf6339032009-09-04 12:35:16 -04001557 ring_buffer_record_disable(buffer);
1558
1559 /* Make sure all commits have finished */
1560 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001561 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001562
1563 ring_buffer_record_enable(buffer);
1564}
1565
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001566void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001567{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001568 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001569 int cpu;
1570
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001571 if (!buffer)
1572 return;
1573
Steven Rostedt621968c2009-09-04 12:02:35 -04001574 ring_buffer_record_disable(buffer);
1575
1576 /* Make sure all commits have finished */
1577 synchronize_sched();
1578
Alexander Z Lam94571582013-08-02 18:36:16 -07001579 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001580
1581 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001582 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001583
1584 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001585}
1586
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001587/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001588void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001589{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001590 struct trace_array *tr;
1591
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001592 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001593 tracing_reset_online_cpus(&tr->trace_buffer);
1594#ifdef CONFIG_TRACER_MAX_TRACE
1595 tracing_reset_online_cpus(&tr->max_buffer);
1596#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001597 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001598}
1599
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001600#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001601#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001602static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001603struct saved_cmdlines_buffer {
1604 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1605 unsigned *map_cmdline_to_pid;
Adrian Salido50b2d862017-04-18 11:44:33 -07001606 unsigned *map_cmdline_to_tgid;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001607 unsigned cmdline_num;
1608 int cmdline_idx;
1609 char *saved_cmdlines;
1610};
1611static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001612
Steven Rostedt25b0b442008-05-12 21:21:00 +02001613/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001614static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001615
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001616static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001617{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001618 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1619}
1620
1621static inline void set_cmdline(int idx, const char *cmdline)
1622{
1623 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1624}
1625
1626static int allocate_cmdlines_buffer(unsigned int val,
1627 struct saved_cmdlines_buffer *s)
1628{
1629 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1630 GFP_KERNEL);
1631 if (!s->map_cmdline_to_pid)
1632 return -ENOMEM;
1633
1634 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1635 if (!s->saved_cmdlines) {
1636 kfree(s->map_cmdline_to_pid);
1637 return -ENOMEM;
1638 }
1639
Adrian Salido50b2d862017-04-18 11:44:33 -07001640 s->map_cmdline_to_tgid = kmalloc_array(val,
1641 sizeof(*s->map_cmdline_to_tgid),
1642 GFP_KERNEL);
1643 if (!s->map_cmdline_to_tgid) {
1644 kfree(s->map_cmdline_to_pid);
1645 kfree(s->saved_cmdlines);
1646 return -ENOMEM;
1647 }
1648
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001649 s->cmdline_idx = 0;
1650 s->cmdline_num = val;
1651 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1652 sizeof(s->map_pid_to_cmdline));
1653 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1654 val * sizeof(*s->map_cmdline_to_pid));
Adrian Salido50b2d862017-04-18 11:44:33 -07001655 memset(s->map_cmdline_to_tgid, NO_CMDLINE_MAP,
1656 val * sizeof(*s->map_cmdline_to_tgid));
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001657
1658 return 0;
1659}
1660
1661static int trace_create_savedcmd(void)
1662{
1663 int ret;
1664
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001665 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001666 if (!savedcmd)
1667 return -ENOMEM;
1668
1669 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1670 if (ret < 0) {
1671 kfree(savedcmd);
1672 savedcmd = NULL;
1673 return -ENOMEM;
1674 }
1675
1676 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001677}
1678
Carsten Emdeb5130b12009-09-13 01:43:07 +02001679int is_tracing_stopped(void)
1680{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001681 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001682}
1683
Steven Rostedt0f048702008-11-05 16:05:44 -05001684/**
1685 * tracing_start - quick start of the tracer
1686 *
1687 * If tracing is enabled but was stopped by tracing_stop,
1688 * this will start the tracer back up.
1689 */
1690void tracing_start(void)
1691{
1692 struct ring_buffer *buffer;
1693 unsigned long flags;
1694
1695 if (tracing_disabled)
1696 return;
1697
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001698 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1699 if (--global_trace.stop_count) {
1700 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001701 /* Someone screwed up their debugging */
1702 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001703 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001704 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001705 goto out;
1706 }
1707
Steven Rostedta2f80712010-03-12 19:56:00 -05001708 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001709 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001710
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001711 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001712 if (buffer)
1713 ring_buffer_record_enable(buffer);
1714
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001715#ifdef CONFIG_TRACER_MAX_TRACE
1716 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001717 if (buffer)
1718 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001719#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001720
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001721 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001722
Steven Rostedt0f048702008-11-05 16:05:44 -05001723 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001724 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1725}
1726
1727static void tracing_start_tr(struct trace_array *tr)
1728{
1729 struct ring_buffer *buffer;
1730 unsigned long flags;
1731
1732 if (tracing_disabled)
1733 return;
1734
1735 /* If global, we need to also start the max tracer */
1736 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1737 return tracing_start();
1738
1739 raw_spin_lock_irqsave(&tr->start_lock, flags);
1740
1741 if (--tr->stop_count) {
1742 if (tr->stop_count < 0) {
1743 /* Someone screwed up their debugging */
1744 WARN_ON_ONCE(1);
1745 tr->stop_count = 0;
1746 }
1747 goto out;
1748 }
1749
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001750 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001751 if (buffer)
1752 ring_buffer_record_enable(buffer);
1753
1754 out:
1755 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001756}
1757
1758/**
1759 * tracing_stop - quick stop of the tracer
1760 *
1761 * Light weight way to stop tracing. Use in conjunction with
1762 * tracing_start.
1763 */
1764void tracing_stop(void)
1765{
1766 struct ring_buffer *buffer;
1767 unsigned long flags;
1768
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001769 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1770 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001771 goto out;
1772
Steven Rostedta2f80712010-03-12 19:56:00 -05001773 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001774 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001775
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001776 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001777 if (buffer)
1778 ring_buffer_record_disable(buffer);
1779
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001780#ifdef CONFIG_TRACER_MAX_TRACE
1781 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001782 if (buffer)
1783 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001784#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001785
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001786 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001787
Steven Rostedt0f048702008-11-05 16:05:44 -05001788 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001789 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1790}
1791
1792static void tracing_stop_tr(struct trace_array *tr)
1793{
1794 struct ring_buffer *buffer;
1795 unsigned long flags;
1796
1797 /* If global, we need to also stop the max tracer */
1798 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1799 return tracing_stop();
1800
1801 raw_spin_lock_irqsave(&tr->start_lock, flags);
1802 if (tr->stop_count++)
1803 goto out;
1804
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001805 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001806 if (buffer)
1807 ring_buffer_record_disable(buffer);
1808
1809 out:
1810 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001811}
1812
Ingo Molnare309b412008-05-12 21:20:51 +02001813void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001814
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001815static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001816{
Carsten Emdea635cf02009-03-18 09:00:41 +01001817 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001818
1819 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001820 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001821
Adrian Salido50b2d862017-04-18 11:44:33 -07001822 preempt_disable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001823 /*
1824 * It's not the end of the world if we don't get
1825 * the lock, but we also don't want to spin
1826 * nor do we want to disable interrupts,
1827 * so if we miss here, then better luck next time.
1828 */
Adrian Salido50b2d862017-04-18 11:44:33 -07001829 if (!arch_spin_trylock(&trace_cmdline_lock)) {
1830 preempt_enable();
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001831 return 0;
Adrian Salido50b2d862017-04-18 11:44:33 -07001832 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001833
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001834 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001835 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001836 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001837
Carsten Emdea635cf02009-03-18 09:00:41 +01001838 /*
1839 * Check whether the cmdline buffer at idx has a pid
1840 * mapped. We are going to overwrite that entry so we
1841 * need to clear the map_pid_to_cmdline. Otherwise we
1842 * would read the new comm for the old pid.
1843 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001844 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001845 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001846 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001847
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001848 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1849 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001850
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001851 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001852 }
1853
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001854 set_cmdline(idx, tsk->comm);
Adrian Salido50b2d862017-04-18 11:44:33 -07001855 savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001856 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salido50b2d862017-04-18 11:44:33 -07001857 preempt_enable();
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001858
1859 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001860}
1861
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001862static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001863{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001864 unsigned map;
1865
Steven Rostedt4ca53082009-03-16 19:20:15 -04001866 if (!pid) {
1867 strcpy(comm, "<idle>");
1868 return;
1869 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001870
Steven Rostedt74bf4072010-01-25 15:11:53 -05001871 if (WARN_ON_ONCE(pid < 0)) {
1872 strcpy(comm, "<XXX>");
1873 return;
1874 }
1875
Steven Rostedt4ca53082009-03-16 19:20:15 -04001876 if (pid > PID_MAX_DEFAULT) {
1877 strcpy(comm, "<...>");
1878 return;
1879 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001880
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001881 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001882 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001883 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001884 else
1885 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001886}
1887
1888void trace_find_cmdline(int pid, char comm[])
1889{
1890 preempt_disable();
1891 arch_spin_lock(&trace_cmdline_lock);
1892
1893 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001894
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001895 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001896 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001897}
1898
Adrian Salido50b2d862017-04-18 11:44:33 -07001899static int __find_tgid_locked(int pid)
Jamie Gennis13b625d2012-11-21 15:04:25 -08001900{
1901 unsigned map;
1902 int tgid;
1903
Dmitry Shmidtcb575f72015-10-28 10:45:04 -07001904 map = savedcmd->map_pid_to_cmdline[pid];
Jamie Gennis13b625d2012-11-21 15:04:25 -08001905 if (map != NO_CMDLINE_MAP)
Adrian Salido50b2d862017-04-18 11:44:33 -07001906 tgid = savedcmd->map_cmdline_to_tgid[map];
Jamie Gennis13b625d2012-11-21 15:04:25 -08001907 else
1908 tgid = -1;
1909
Adrian Salido50b2d862017-04-18 11:44:33 -07001910 return tgid;
1911}
1912
1913int trace_find_tgid(int pid)
1914{
1915 int tgid;
1916
1917 preempt_disable();
1918 arch_spin_lock(&trace_cmdline_lock);
1919
1920 tgid = __find_tgid_locked(pid);
1921
Jamie Gennis13b625d2012-11-21 15:04:25 -08001922 arch_spin_unlock(&trace_cmdline_lock);
1923 preempt_enable();
1924
1925 return tgid;
1926}
1927
Ingo Molnare309b412008-05-12 21:20:51 +02001928void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001929{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001930 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001931 return;
1932
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001933 if (!__this_cpu_read(trace_cmdline_save))
1934 return;
1935
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001936 if (trace_save_cmdline(tsk))
1937 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001938}
1939
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001940void
Steven Rostedt38697052008-10-01 13:14:09 -04001941tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1942 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001943{
1944 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001945
Steven Rostedt777e2082008-09-29 23:02:42 -04001946 entry->preempt_count = pc & 0xff;
1947 entry->pid = (tsk) ? tsk->pid : 0;
1948 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001949#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001950 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001951#else
1952 TRACE_FLAG_IRQS_NOSUPPORT |
1953#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01001954 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001955 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondeti04e002a2016-12-09 21:50:17 +05301956 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001957 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1958 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001959}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001960EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001961
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001962static __always_inline void
1963trace_event_setup(struct ring_buffer_event *event,
1964 int type, unsigned long flags, int pc)
1965{
1966 struct trace_entry *ent = ring_buffer_event_data(event);
1967
1968 tracing_generic_entry_update(ent, flags, pc);
1969 ent->type = type;
1970}
1971
Steven Rostedte77405a2009-09-02 14:17:06 -04001972struct ring_buffer_event *
1973trace_buffer_lock_reserve(struct ring_buffer *buffer,
1974 int type,
1975 unsigned long len,
1976 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001977{
1978 struct ring_buffer_event *event;
1979
Steven Rostedte77405a2009-09-02 14:17:06 -04001980 event = ring_buffer_lock_reserve(buffer, len);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001981 if (event != NULL)
1982 trace_event_setup(event, type, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001983
1984 return event;
1985}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001986
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001987DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1988DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1989static int trace_buffered_event_ref;
1990
1991/**
1992 * trace_buffered_event_enable - enable buffering events
1993 *
1994 * When events are being filtered, it is quicker to use a temporary
1995 * buffer to write the event data into if there's a likely chance
1996 * that it will not be committed. The discard of the ring buffer
1997 * is not as fast as committing, and is much slower than copying
1998 * a commit.
1999 *
2000 * When an event is to be filtered, allocate per cpu buffers to
2001 * write the event data into, and if the event is filtered and discarded
2002 * it is simply dropped, otherwise, the entire data is to be committed
2003 * in one shot.
2004 */
2005void trace_buffered_event_enable(void)
2006{
2007 struct ring_buffer_event *event;
2008 struct page *page;
2009 int cpu;
2010
2011 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2012
2013 if (trace_buffered_event_ref++)
2014 return;
2015
2016 for_each_tracing_cpu(cpu) {
2017 page = alloc_pages_node(cpu_to_node(cpu),
2018 GFP_KERNEL | __GFP_NORETRY, 0);
2019 if (!page)
2020 goto failed;
2021
2022 event = page_address(page);
2023 memset(event, 0, sizeof(*event));
2024
2025 per_cpu(trace_buffered_event, cpu) = event;
2026
2027 preempt_disable();
2028 if (cpu == smp_processor_id() &&
2029 this_cpu_read(trace_buffered_event) !=
2030 per_cpu(trace_buffered_event, cpu))
2031 WARN_ON_ONCE(1);
2032 preempt_enable();
2033 }
2034
2035 return;
2036 failed:
2037 trace_buffered_event_disable();
2038}
2039
2040static void enable_trace_buffered_event(void *data)
2041{
2042 /* Probably not needed, but do it anyway */
2043 smp_rmb();
2044 this_cpu_dec(trace_buffered_event_cnt);
2045}
2046
2047static void disable_trace_buffered_event(void *data)
2048{
2049 this_cpu_inc(trace_buffered_event_cnt);
2050}
2051
2052/**
2053 * trace_buffered_event_disable - disable buffering events
2054 *
2055 * When a filter is removed, it is faster to not use the buffered
2056 * events, and to commit directly into the ring buffer. Free up
2057 * the temp buffers when there are no more users. This requires
2058 * special synchronization with current events.
2059 */
2060void trace_buffered_event_disable(void)
2061{
2062 int cpu;
2063
2064 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2065
2066 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2067 return;
2068
2069 if (--trace_buffered_event_ref)
2070 return;
2071
2072 preempt_disable();
2073 /* For each CPU, set the buffer as used. */
2074 smp_call_function_many(tracing_buffer_mask,
2075 disable_trace_buffered_event, NULL, 1);
2076 preempt_enable();
2077
2078 /* Wait for all current users to finish */
2079 synchronize_sched();
2080
2081 for_each_tracing_cpu(cpu) {
2082 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2083 per_cpu(trace_buffered_event, cpu) = NULL;
2084 }
2085 /*
2086 * Make sure trace_buffered_event is NULL before clearing
2087 * trace_buffered_event_cnt.
2088 */
2089 smp_wmb();
2090
2091 preempt_disable();
2092 /* Do the work on each cpu */
2093 smp_call_function_many(tracing_buffer_mask,
2094 enable_trace_buffered_event, NULL, 1);
2095 preempt_enable();
2096}
2097
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002098void
2099__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
2100{
2101 __this_cpu_write(trace_cmdline_save, true);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002102
2103 /* If this is the temp buffer, we need to commit fully */
2104 if (this_cpu_read(trace_buffered_event) == event) {
2105 /* Length is in event->array[0] */
2106 ring_buffer_write(buffer, event->array[0], &event->array[1]);
2107 /* Release the temp buffer */
2108 this_cpu_dec(trace_buffered_event_cnt);
2109 } else
2110 ring_buffer_unlock_commit(buffer, event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002111}
2112
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002113static struct ring_buffer *temp_buffer;
2114
Steven Rostedtef5580d2009-02-27 19:38:04 -05002115struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002116trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002117 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002118 int type, unsigned long len,
2119 unsigned long flags, int pc)
2120{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002121 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002122 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002123
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002124 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002125
2126 if ((trace_file->flags &
2127 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2128 (entry = this_cpu_read(trace_buffered_event))) {
2129 /* Try to use the per cpu buffer first */
2130 val = this_cpu_inc_return(trace_buffered_event_cnt);
2131 if (val == 1) {
2132 trace_event_setup(entry, type, flags, pc);
2133 entry->array[0] = len;
2134 return entry;
2135 }
2136 this_cpu_dec(trace_buffered_event_cnt);
2137 }
2138
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002139 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002140 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002141 /*
2142 * If tracing is off, but we have triggers enabled
2143 * we still need to look at the event data. Use the temp_buffer
2144 * to store the trace event for the tigger to use. It's recusive
2145 * safe and will not be recorded anywhere.
2146 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002147 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002148 *current_rb = temp_buffer;
2149 entry = trace_buffer_lock_reserve(*current_rb,
2150 type, len, flags, pc);
2151 }
2152 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002153}
2154EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2155
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002156void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2157 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002158 struct ring_buffer_event *event,
2159 unsigned long flags, int pc,
2160 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002161{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002162 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002163
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002164 /*
2165 * If regs is not set, then skip the following callers:
2166 * trace_buffer_unlock_commit_regs
2167 * event_trigger_unlock_commit
2168 * trace_event_buffer_commit
2169 * trace_event_raw_event_sched_switch
2170 * Note, we can still get here via blktrace, wakeup tracer
2171 * and mmiotrace, but that's ok if they lose a function or
2172 * two. They are that meaningful.
2173 */
2174 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002175 ftrace_trace_userstack(buffer, flags, pc);
2176}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002177
Ingo Molnare309b412008-05-12 21:20:51 +02002178void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002179trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002180 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2181 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002182{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002183 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002184 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002185 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002186 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002187
Steven Rostedte77405a2009-09-02 14:17:06 -04002188 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002189 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002190 if (!event)
2191 return;
2192 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002193 entry->ip = ip;
2194 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002195
Tom Zanussif306cc82013-10-24 08:34:17 -05002196 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002197 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002198}
2199
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002200#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002201
2202#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2203struct ftrace_stack {
2204 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2205};
2206
2207static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2208static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2209
Steven Rostedte77405a2009-09-02 14:17:06 -04002210static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002211 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002212 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002213{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002214 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002215 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002216 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002217 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002218 int use_stack;
2219 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002220
2221 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002222 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002223
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002224 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002225 * Add two, for this function and the call to save_stack_trace()
2226 * If regs is set, then these functions will not be in the way.
2227 */
2228 if (!regs)
2229 trace.skip += 2;
2230
2231 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002232 * Since events can happen in NMIs there's no safe way to
2233 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2234 * or NMI comes in, it will just have to use the default
2235 * FTRACE_STACK_SIZE.
2236 */
2237 preempt_disable_notrace();
2238
Shan Wei82146522012-11-19 13:21:01 +08002239 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002240 /*
2241 * We don't need any atomic variables, just a barrier.
2242 * If an interrupt comes in, we don't care, because it would
2243 * have exited and put the counter back to what we want.
2244 * We just need a barrier to keep gcc from moving things
2245 * around.
2246 */
2247 barrier();
2248 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002249 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002250 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2251
2252 if (regs)
2253 save_stack_trace_regs(regs, &trace);
2254 else
2255 save_stack_trace(&trace);
2256
2257 if (trace.nr_entries > size)
2258 size = trace.nr_entries;
2259 } else
2260 /* From now on, use_stack is a boolean */
2261 use_stack = 0;
2262
2263 size *= sizeof(unsigned long);
2264
2265 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
2266 sizeof(*entry) + size, flags, pc);
2267 if (!event)
2268 goto out;
2269 entry = ring_buffer_event_data(event);
2270
2271 memset(&entry->caller, 0, size);
2272
2273 if (use_stack)
2274 memcpy(&entry->caller, trace.entries,
2275 trace.nr_entries * sizeof(unsigned long));
2276 else {
2277 trace.max_entries = FTRACE_STACK_ENTRIES;
2278 trace.entries = entry->caller;
2279 if (regs)
2280 save_stack_trace_regs(regs, &trace);
2281 else
2282 save_stack_trace(&trace);
2283 }
2284
2285 entry->size = trace.nr_entries;
2286
Tom Zanussif306cc82013-10-24 08:34:17 -05002287 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002288 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002289
2290 out:
2291 /* Again, don't let gcc optimize things here */
2292 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002293 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002294 preempt_enable_notrace();
2295
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002296}
2297
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002298static inline void ftrace_trace_stack(struct trace_array *tr,
2299 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002300 unsigned long flags,
2301 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002302{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002303 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002304 return;
2305
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002306 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002307}
2308
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002309void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2310 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002311{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002312 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002313}
2314
Steven Rostedt03889382009-12-11 09:48:22 -05002315/**
2316 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002317 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002318 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002319void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002320{
2321 unsigned long flags;
2322
2323 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002324 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002325
2326 local_save_flags(flags);
2327
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002328 /*
2329 * Skip 3 more, seems to get us at the caller of
2330 * this function.
2331 */
2332 skip += 3;
2333 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2334 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002335}
2336
Steven Rostedt91e86e52010-11-10 12:56:12 +01002337static DEFINE_PER_CPU(int, user_stack_count);
2338
Steven Rostedte77405a2009-09-02 14:17:06 -04002339void
2340ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002341{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002342 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002343 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002344 struct userstack_entry *entry;
2345 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002346
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002347 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002348 return;
2349
Steven Rostedtb6345872010-03-12 20:03:30 -05002350 /*
2351 * NMIs can not handle page faults, even with fix ups.
2352 * The save user stack can (and often does) fault.
2353 */
2354 if (unlikely(in_nmi()))
2355 return;
2356
Steven Rostedt91e86e52010-11-10 12:56:12 +01002357 /*
2358 * prevent recursion, since the user stack tracing may
2359 * trigger other kernel events.
2360 */
2361 preempt_disable();
2362 if (__this_cpu_read(user_stack_count))
2363 goto out;
2364
2365 __this_cpu_inc(user_stack_count);
2366
Steven Rostedte77405a2009-09-02 14:17:06 -04002367 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002368 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002369 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002370 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002371 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002372
Steven Rostedt48659d32009-09-11 11:36:23 -04002373 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002374 memset(&entry->caller, 0, sizeof(entry->caller));
2375
2376 trace.nr_entries = 0;
2377 trace.max_entries = FTRACE_STACK_ENTRIES;
2378 trace.skip = 0;
2379 trace.entries = entry->caller;
2380
2381 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002382 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002383 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002384
Li Zefan1dbd1952010-12-09 15:47:56 +08002385 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002386 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002387 out:
2388 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002389}
2390
Hannes Eder4fd27352009-02-10 19:44:12 +01002391#ifdef UNUSED
2392static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002393{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002394 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002395}
Hannes Eder4fd27352009-02-10 19:44:12 +01002396#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002397
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002398#endif /* CONFIG_STACKTRACE */
2399
Steven Rostedt07d777f2011-09-22 14:01:55 -04002400/* created for use with alloc_percpu */
2401struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002402 int nesting;
2403 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002404};
2405
2406static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002407
2408/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002409 * Thise allows for lockless recording. If we're nested too deeply, then
2410 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002411 */
2412static char *get_trace_buf(void)
2413{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002414 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002415
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002416 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002417 return NULL;
2418
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002419 return &buffer->buffer[buffer->nesting++][0];
2420}
2421
2422static void put_trace_buf(void)
2423{
2424 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002425}
2426
2427static int alloc_percpu_trace_buffer(void)
2428{
2429 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002430
2431 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002432 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2433 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002434
2435 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002436 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002437}
2438
Steven Rostedt81698832012-10-11 10:15:05 -04002439static int buffers_allocated;
2440
Steven Rostedt07d777f2011-09-22 14:01:55 -04002441void trace_printk_init_buffers(void)
2442{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002443 if (buffers_allocated)
2444 return;
2445
2446 if (alloc_percpu_trace_buffer())
2447 return;
2448
Steven Rostedt2184db42014-05-28 13:14:40 -04002449 /* trace_printk() is for debug use only. Don't use it in production. */
2450
Joe Perchesa395d6a2016-03-22 14:28:09 -07002451 pr_warn("\n");
2452 pr_warn("**********************************************************\n");
2453 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2454 pr_warn("** **\n");
2455 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2456 pr_warn("** **\n");
2457 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2458 pr_warn("** unsafe for production use. **\n");
2459 pr_warn("** **\n");
2460 pr_warn("** If you see this message and you are not debugging **\n");
2461 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2462 pr_warn("** **\n");
2463 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2464 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002465
Steven Rostedtb382ede62012-10-10 21:44:34 -04002466 /* Expand the buffers to set size */
2467 tracing_update_buffers();
2468
Steven Rostedt07d777f2011-09-22 14:01:55 -04002469 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002470
2471 /*
2472 * trace_printk_init_buffers() can be called by modules.
2473 * If that happens, then we need to start cmdline recording
2474 * directly here. If the global_trace.buffer is already
2475 * allocated here, then this was called by module code.
2476 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002477 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002478 tracing_start_cmdline_record();
2479}
2480
2481void trace_printk_start_comm(void)
2482{
2483 /* Start tracing comms if trace printk is set */
2484 if (!buffers_allocated)
2485 return;
2486 tracing_start_cmdline_record();
2487}
2488
2489static void trace_printk_start_stop_comm(int enabled)
2490{
2491 if (!buffers_allocated)
2492 return;
2493
2494 if (enabled)
2495 tracing_start_cmdline_record();
2496 else
2497 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002498}
2499
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002500/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002501 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002502 *
2503 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002504int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002505{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002506 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002507 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002508 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002509 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002510 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002511 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002512 char *tbuffer;
2513 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002514
2515 if (unlikely(tracing_selftest_running || tracing_disabled))
2516 return 0;
2517
2518 /* Don't pollute graph traces with trace_vprintk internals */
2519 pause_graph_tracing();
2520
2521 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002522 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002523
Steven Rostedt07d777f2011-09-22 14:01:55 -04002524 tbuffer = get_trace_buf();
2525 if (!tbuffer) {
2526 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002527 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002528 }
2529
2530 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2531
2532 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002533 goto out;
2534
Steven Rostedt07d777f2011-09-22 14:01:55 -04002535 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002536 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002537 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002538 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2539 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002540 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002541 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002542 entry = ring_buffer_event_data(event);
2543 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002544 entry->fmt = fmt;
2545
Steven Rostedt07d777f2011-09-22 14:01:55 -04002546 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002547 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002548 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002549 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002550 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002551
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002552out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002553 put_trace_buf();
2554
2555out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002556 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002557 unpause_graph_tracing();
2558
2559 return len;
2560}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002561EXPORT_SYMBOL_GPL(trace_vbprintk);
2562
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002563static int
2564__trace_array_vprintk(struct ring_buffer *buffer,
2565 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002566{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002567 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002568 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002569 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002570 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002571 unsigned long flags;
2572 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002573
2574 if (tracing_disabled || tracing_selftest_running)
2575 return 0;
2576
Steven Rostedt07d777f2011-09-22 14:01:55 -04002577 /* Don't pollute graph traces with trace_vprintk internals */
2578 pause_graph_tracing();
2579
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002580 pc = preempt_count();
2581 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002582
Steven Rostedt07d777f2011-09-22 14:01:55 -04002583
2584 tbuffer = get_trace_buf();
2585 if (!tbuffer) {
2586 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002587 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002588 }
2589
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002590 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002591
Steven Rostedt07d777f2011-09-22 14:01:55 -04002592 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002593 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002594 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002595 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002596 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002597 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002598 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002599 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002600
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002601 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002602 if (!call_filter_check_discard(call, entry, buffer, event)) {
Shashank Mittal43beb422016-05-20 13:06:09 -07002603 stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002604 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002605 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002606 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002607
2608out:
2609 put_trace_buf();
2610
2611out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002612 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002613 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002614
2615 return len;
2616}
Steven Rostedt659372d2009-09-03 19:11:07 -04002617
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002618int trace_array_vprintk(struct trace_array *tr,
2619 unsigned long ip, const char *fmt, va_list args)
2620{
2621 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2622}
2623
2624int trace_array_printk(struct trace_array *tr,
2625 unsigned long ip, const char *fmt, ...)
2626{
2627 int ret;
2628 va_list ap;
2629
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002630 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002631 return 0;
2632
2633 va_start(ap, fmt);
2634 ret = trace_array_vprintk(tr, ip, fmt, ap);
2635 va_end(ap);
2636 return ret;
2637}
2638
2639int trace_array_printk_buf(struct ring_buffer *buffer,
2640 unsigned long ip, const char *fmt, ...)
2641{
2642 int ret;
2643 va_list ap;
2644
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002645 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002646 return 0;
2647
2648 va_start(ap, fmt);
2649 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2650 va_end(ap);
2651 return ret;
2652}
2653
Steven Rostedt659372d2009-09-03 19:11:07 -04002654int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2655{
Steven Rostedta813a152009-10-09 01:41:35 -04002656 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002657}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002658EXPORT_SYMBOL_GPL(trace_vprintk);
2659
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002660static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002661{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002662 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2663
Steven Rostedt5a90f572008-09-03 17:42:51 -04002664 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002665 if (buf_iter)
2666 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002667}
2668
Ingo Molnare309b412008-05-12 21:20:51 +02002669static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002670peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2671 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002672{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002673 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002674 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002675
Steven Rostedtd7690412008-10-01 00:29:53 -04002676 if (buf_iter)
2677 event = ring_buffer_iter_peek(buf_iter, ts);
2678 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002679 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002680 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002681
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002682 if (event) {
2683 iter->ent_size = ring_buffer_event_length(event);
2684 return ring_buffer_event_data(event);
2685 }
2686 iter->ent_size = 0;
2687 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002688}
Steven Rostedtd7690412008-10-01 00:29:53 -04002689
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002690static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002691__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2692 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002693{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002694 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002695 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002696 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002697 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002698 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002699 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002700 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002701 int cpu;
2702
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002703 /*
2704 * If we are in a per_cpu trace file, don't bother by iterating over
2705 * all cpu and peek directly.
2706 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002707 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002708 if (ring_buffer_empty_cpu(buffer, cpu_file))
2709 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002710 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002711 if (ent_cpu)
2712 *ent_cpu = cpu_file;
2713
2714 return ent;
2715 }
2716
Steven Rostedtab464282008-05-12 21:21:00 +02002717 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002718
2719 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002720 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002721
Steven Rostedtbc21b472010-03-31 19:49:26 -04002722 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002723
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002724 /*
2725 * Pick the entry with the smallest timestamp:
2726 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002727 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002728 next = ent;
2729 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002730 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002731 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002732 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002733 }
2734 }
2735
Steven Rostedt12b5da32012-03-27 10:43:28 -04002736 iter->ent_size = next_size;
2737
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002738 if (ent_cpu)
2739 *ent_cpu = next_cpu;
2740
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002741 if (ent_ts)
2742 *ent_ts = next_ts;
2743
Steven Rostedtbc21b472010-03-31 19:49:26 -04002744 if (missing_events)
2745 *missing_events = next_lost;
2746
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002747 return next;
2748}
2749
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002750/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002751struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2752 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002753{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002754 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002755}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002756
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002757/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002758void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002759{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002760 iter->ent = __find_next_entry(iter, &iter->cpu,
2761 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002762
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002763 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002764 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002765
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002766 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002767}
2768
Ingo Molnare309b412008-05-12 21:20:51 +02002769static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002770{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002771 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002772 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002773}
2774
Ingo Molnare309b412008-05-12 21:20:51 +02002775static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002776{
2777 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002778 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002779 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002780
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002781 WARN_ON_ONCE(iter->leftover);
2782
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002783 (*pos)++;
2784
2785 /* can't go backwards */
2786 if (iter->idx > i)
2787 return NULL;
2788
2789 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002790 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002791 else
2792 ent = iter;
2793
2794 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002795 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002796
2797 iter->pos = *pos;
2798
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002799 return ent;
2800}
2801
Jason Wessel955b61e2010-08-05 09:22:23 -05002802void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002803{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002804 struct ring_buffer_event *event;
2805 struct ring_buffer_iter *buf_iter;
2806 unsigned long entries = 0;
2807 u64 ts;
2808
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002809 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002810
Steven Rostedt6d158a82012-06-27 20:46:14 -04002811 buf_iter = trace_buffer_iter(iter, cpu);
2812 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002813 return;
2814
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002815 ring_buffer_iter_reset(buf_iter);
2816
2817 /*
2818 * We could have the case with the max latency tracers
2819 * that a reset never took place on a cpu. This is evident
2820 * by the timestamp being before the start of the buffer.
2821 */
2822 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002823 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002824 break;
2825 entries++;
2826 ring_buffer_read(buf_iter, NULL);
2827 }
2828
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002829 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002830}
2831
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002832/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002833 * The current tracer is copied to avoid a global locking
2834 * all around.
2835 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002836static void *s_start(struct seq_file *m, loff_t *pos)
2837{
2838 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002839 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002840 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002841 void *p = NULL;
2842 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002843 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002844
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002845 /*
2846 * copy the tracer to avoid using a global lock all around.
2847 * iter->trace is a copy of current_trace, the pointer to the
2848 * name may be used instead of a strcmp(), as iter->trace->name
2849 * will point to the same string as current_trace->name.
2850 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002851 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002852 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2853 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002854 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002855
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002856#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002857 if (iter->snapshot && iter->trace->use_max_tr)
2858 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002859#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002860
2861 if (!iter->snapshot)
2862 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002863
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002864 if (*pos != iter->pos) {
2865 iter->ent = NULL;
2866 iter->cpu = 0;
2867 iter->idx = -1;
2868
Steven Rostedtae3b5092013-01-23 15:22:59 -05002869 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002870 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002871 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002872 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002873 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002874
Lai Jiangshanac91d852010-03-02 17:54:50 +08002875 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002876 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2877 ;
2878
2879 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002880 /*
2881 * If we overflowed the seq_file before, then we want
2882 * to just reuse the trace_seq buffer again.
2883 */
2884 if (iter->leftover)
2885 p = iter;
2886 else {
2887 l = *pos - 1;
2888 p = s_next(m, p, &l);
2889 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002890 }
2891
Lai Jiangshan4f535962009-05-18 19:35:34 +08002892 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002893 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002894 return p;
2895}
2896
2897static void s_stop(struct seq_file *m, void *p)
2898{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002899 struct trace_iterator *iter = m->private;
2900
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002901#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002902 if (iter->snapshot && iter->trace->use_max_tr)
2903 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002904#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002905
2906 if (!iter->snapshot)
2907 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002908
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002909 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002910 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002911}
2912
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002913static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002914get_total_entries(struct trace_buffer *buf,
2915 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002916{
2917 unsigned long count;
2918 int cpu;
2919
2920 *total = 0;
2921 *entries = 0;
2922
2923 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002924 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002925 /*
2926 * If this buffer has skipped entries, then we hold all
2927 * entries for the trace and we need to ignore the
2928 * ones before the time stamp.
2929 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002930 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2931 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002932 /* total is the same as the entries */
2933 *total += count;
2934 } else
2935 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002936 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002937 *entries += count;
2938 }
2939}
2940
Ingo Molnare309b412008-05-12 21:20:51 +02002941static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002942{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002943 seq_puts(m, "# _------=> CPU# \n"
2944 "# / _-----=> irqs-off \n"
2945 "# | / _----=> need-resched \n"
2946 "# || / _---=> hardirq/softirq \n"
2947 "# ||| / _--=> preempt-depth \n"
2948 "# |||| / delay \n"
2949 "# cmd pid ||||| time | caller \n"
2950 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002951}
2952
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002953static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002954{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002955 unsigned long total;
2956 unsigned long entries;
2957
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002958 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002959 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2960 entries, total, num_online_cpus());
2961 seq_puts(m, "#\n");
2962}
2963
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002964static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002965{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002966 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002967 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2968 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002969}
2970
Jamie Gennis13b625d2012-11-21 15:04:25 -08002971static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
2972{
2973 print_event_info(buf, m);
2974 seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2975 seq_puts(m, "# | | | | | |\n");
2976}
2977
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002978static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002979{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002980 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002981 seq_puts(m, "# _-----=> irqs-off\n"
2982 "# / _----=> need-resched\n"
2983 "# | / _---=> hardirq/softirq\n"
2984 "# || / _--=> preempt-depth\n"
2985 "# ||| / delay\n"
2986 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2987 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002988}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002989
Jamie Gennis13b625d2012-11-21 15:04:25 -08002990static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
2991{
2992 print_event_info(buf, m);
2993 seq_puts(m, "# _-----=> irqs-off\n");
2994 seq_puts(m, "# / _----=> need-resched\n");
2995 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2996 seq_puts(m, "# || / _--=> preempt-depth\n");
2997 seq_puts(m, "# ||| / delay\n");
2998 seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
2999 seq_puts(m, "# | | | | |||| | |\n");
3000}
3001
Jiri Olsa62b915f2010-04-02 19:01:22 +02003002void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003003print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3004{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003005 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003006 struct trace_buffer *buf = iter->trace_buffer;
3007 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003008 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003009 unsigned long entries;
3010 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003011 const char *name = "preemption";
3012
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003013 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003014
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003015 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003016
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003017 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003018 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003019 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003020 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003021 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003022 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003023 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003024 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003025 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003026 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003027#if defined(CONFIG_PREEMPT_NONE)
3028 "server",
3029#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3030 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003031#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003032 "preempt",
3033#else
3034 "unknown",
3035#endif
3036 /* These are reserved for later use */
3037 0, 0, 0, 0);
3038#ifdef CONFIG_SMP
3039 seq_printf(m, " #P:%d)\n", num_online_cpus());
3040#else
3041 seq_puts(m, ")\n");
3042#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003043 seq_puts(m, "# -----------------\n");
3044 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003045 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003046 data->comm, data->pid,
3047 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003048 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003049 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003050
3051 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003052 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003053 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3054 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003055 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003056 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3057 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003058 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003059 }
3060
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003061 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003062}
3063
Steven Rostedta3097202008-11-07 22:36:02 -05003064static void test_cpu_buff_start(struct trace_iterator *iter)
3065{
3066 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003067 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003068
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003069 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003070 return;
3071
3072 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3073 return;
3074
Sasha Levin919cd972015-09-04 12:45:56 -04003075 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003076 return;
3077
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003078 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003079 return;
3080
Sasha Levin919cd972015-09-04 12:45:56 -04003081 if (iter->started)
3082 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003083
3084 /* Don't print started cpu buffer for the first entry of the trace */
3085 if (iter->idx > 1)
3086 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3087 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003088}
3089
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003090static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003091{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003092 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003093 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003094 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003095 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003096 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003097
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003098 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003099
Steven Rostedta3097202008-11-07 22:36:02 -05003100 test_cpu_buff_start(iter);
3101
Steven Rostedtf633cef2008-12-23 23:24:13 -05003102 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003103
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003104 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003105 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3106 trace_print_lat_context(iter);
3107 else
3108 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003109 }
3110
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003111 if (trace_seq_has_overflowed(s))
3112 return TRACE_TYPE_PARTIAL_LINE;
3113
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003114 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003115 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003116
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003117 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003118
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003119 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003120}
3121
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003122static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003123{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003124 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003125 struct trace_seq *s = &iter->seq;
3126 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003127 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003128
3129 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003130
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003131 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003132 trace_seq_printf(s, "%d %d %llu ",
3133 entry->pid, iter->cpu, iter->ts);
3134
3135 if (trace_seq_has_overflowed(s))
3136 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003137
Steven Rostedtf633cef2008-12-23 23:24:13 -05003138 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003139 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003140 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003141
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003142 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003143
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003144 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003145}
3146
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003147static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003148{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003149 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003150 struct trace_seq *s = &iter->seq;
3151 unsigned char newline = '\n';
3152 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003153 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003154
3155 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003156
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003157 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003158 SEQ_PUT_HEX_FIELD(s, entry->pid);
3159 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3160 SEQ_PUT_HEX_FIELD(s, iter->ts);
3161 if (trace_seq_has_overflowed(s))
3162 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003163 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003164
Steven Rostedtf633cef2008-12-23 23:24:13 -05003165 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003166 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003167 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003168 if (ret != TRACE_TYPE_HANDLED)
3169 return ret;
3170 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003171
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003172 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003173
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003174 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003175}
3176
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003177static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003178{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003179 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003180 struct trace_seq *s = &iter->seq;
3181 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003182 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003183
3184 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003185
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003186 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003187 SEQ_PUT_FIELD(s, entry->pid);
3188 SEQ_PUT_FIELD(s, iter->cpu);
3189 SEQ_PUT_FIELD(s, iter->ts);
3190 if (trace_seq_has_overflowed(s))
3191 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003192 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003193
Steven Rostedtf633cef2008-12-23 23:24:13 -05003194 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003195 return event ? event->funcs->binary(iter, 0, event) :
3196 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003197}
3198
Jiri Olsa62b915f2010-04-02 19:01:22 +02003199int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003200{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003201 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003202 int cpu;
3203
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003204 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003205 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003206 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003207 buf_iter = trace_buffer_iter(iter, cpu);
3208 if (buf_iter) {
3209 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003210 return 0;
3211 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003212 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003213 return 0;
3214 }
3215 return 1;
3216 }
3217
Steven Rostedtab464282008-05-12 21:21:00 +02003218 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003219 buf_iter = trace_buffer_iter(iter, cpu);
3220 if (buf_iter) {
3221 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003222 return 0;
3223 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003224 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003225 return 0;
3226 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003227 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003228
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003229 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003230}
3231
Lai Jiangshan4f535962009-05-18 19:35:34 +08003232/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003233enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003234{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003235 struct trace_array *tr = iter->tr;
3236 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003237 enum print_line_t ret;
3238
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003239 if (iter->lost_events) {
3240 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3241 iter->cpu, iter->lost_events);
3242 if (trace_seq_has_overflowed(&iter->seq))
3243 return TRACE_TYPE_PARTIAL_LINE;
3244 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003245
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003246 if (iter->trace && iter->trace->print_line) {
3247 ret = iter->trace->print_line(iter);
3248 if (ret != TRACE_TYPE_UNHANDLED)
3249 return ret;
3250 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003251
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003252 if (iter->ent->type == TRACE_BPUTS &&
3253 trace_flags & TRACE_ITER_PRINTK &&
3254 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3255 return trace_print_bputs_msg_only(iter);
3256
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003257 if (iter->ent->type == TRACE_BPRINT &&
3258 trace_flags & TRACE_ITER_PRINTK &&
3259 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003260 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003261
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003262 if (iter->ent->type == TRACE_PRINT &&
3263 trace_flags & TRACE_ITER_PRINTK &&
3264 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003265 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003266
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003267 if (trace_flags & TRACE_ITER_BIN)
3268 return print_bin_fmt(iter);
3269
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003270 if (trace_flags & TRACE_ITER_HEX)
3271 return print_hex_fmt(iter);
3272
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003273 if (trace_flags & TRACE_ITER_RAW)
3274 return print_raw_fmt(iter);
3275
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003276 return print_trace_fmt(iter);
3277}
3278
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003279void trace_latency_header(struct seq_file *m)
3280{
3281 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003282 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003283
3284 /* print nothing if the buffers are empty */
3285 if (trace_empty(iter))
3286 return;
3287
3288 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3289 print_trace_header(m, iter);
3290
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003291 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003292 print_lat_help_header(m);
3293}
3294
Jiri Olsa62b915f2010-04-02 19:01:22 +02003295void trace_default_header(struct seq_file *m)
3296{
3297 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003298 struct trace_array *tr = iter->tr;
3299 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003300
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003301 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3302 return;
3303
Jiri Olsa62b915f2010-04-02 19:01:22 +02003304 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3305 /* print nothing if the buffers are empty */
3306 if (trace_empty(iter))
3307 return;
3308 print_trace_header(m, iter);
3309 if (!(trace_flags & TRACE_ITER_VERBOSE))
3310 print_lat_help_header(m);
3311 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003312 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3313 if (trace_flags & TRACE_ITER_IRQ_INFO)
Jamie Gennis13b625d2012-11-21 15:04:25 -08003314 if (trace_flags & TRACE_ITER_TGID)
3315 print_func_help_header_irq_tgid(iter->trace_buffer, m);
3316 else
3317 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003318 else
Jamie Gennis13b625d2012-11-21 15:04:25 -08003319 if (trace_flags & TRACE_ITER_TGID)
3320 print_func_help_header_tgid(iter->trace_buffer, m);
3321 else
3322 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003323 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003324 }
3325}
3326
Steven Rostedte0a413f2011-09-29 21:26:16 -04003327static void test_ftrace_alive(struct seq_file *m)
3328{
3329 if (!ftrace_is_dead())
3330 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003331 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3332 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003333}
3334
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003335#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003336static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003337{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003338 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3339 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3340 "# Takes a snapshot of the main buffer.\n"
3341 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3342 "# (Doesn't have to be '2' works with any number that\n"
3343 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003344}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003345
3346static void show_snapshot_percpu_help(struct seq_file *m)
3347{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003348 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003349#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003350 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3351 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003352#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003353 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3354 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003355#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003356 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3357 "# (Doesn't have to be '2' works with any number that\n"
3358 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003359}
3360
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003361static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3362{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003363 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003364 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003365 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003366 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003367
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003368 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003369 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3370 show_snapshot_main_help(m);
3371 else
3372 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003373}
3374#else
3375/* Should never be called */
3376static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3377#endif
3378
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003379static int s_show(struct seq_file *m, void *v)
3380{
3381 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003382 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003383
3384 if (iter->ent == NULL) {
3385 if (iter->tr) {
3386 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3387 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003388 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003389 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003390 if (iter->snapshot && trace_empty(iter))
3391 print_snapshot_help(m, iter);
3392 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003393 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003394 else
3395 trace_default_header(m);
3396
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003397 } else if (iter->leftover) {
3398 /*
3399 * If we filled the seq_file buffer earlier, we
3400 * want to just show it now.
3401 */
3402 ret = trace_print_seq(m, &iter->seq);
3403
3404 /* ret should this time be zero, but you never know */
3405 iter->leftover = ret;
3406
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003407 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003408 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003409 ret = trace_print_seq(m, &iter->seq);
3410 /*
3411 * If we overflow the seq_file buffer, then it will
3412 * ask us for this data again at start up.
3413 * Use that instead.
3414 * ret is 0 if seq_file write succeeded.
3415 * -1 otherwise.
3416 */
3417 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003418 }
3419
3420 return 0;
3421}
3422
Oleg Nesterov649e9c72013-07-23 17:25:54 +02003423/*
3424 * Should be used after trace_array_get(), trace_types_lock
3425 * ensures that i_cdev was already initialized.
3426 */
3427static inline int tracing_get_cpu(struct inode *inode)
3428{
3429 if (inode->i_cdev) /* See trace_create_cpu_file() */
3430 return (long)inode->i_cdev - 1;
3431 return RING_BUFFER_ALL_CPUS;
3432}
3433
James Morris88e9d342009-09-22 16:43:43 -07003434static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003435 .start = s_start,
3436 .next = s_next,
3437 .stop = s_stop,
3438 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003439};
3440
Ingo Molnare309b412008-05-12 21:20:51 +02003441static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003442__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003444 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003445 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003446 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003447
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003448 if (tracing_disabled)
3449 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003450
Jiri Olsa50e18b92012-04-25 10:23:39 +02003451 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003452 if (!iter)
3453 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003454
Gil Fruchter72917232015-06-09 10:32:35 +03003455 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003456 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003457 if (!iter->buffer_iter)
3458 goto release;
3459
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003460 /*
3461 * We make a copy of the current tracer to avoid concurrent
3462 * changes on it while we are reading.
3463 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003464 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003465 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003466 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003467 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003468
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003469 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003470
Li Zefan79f55992009-06-15 14:58:26 +08003471 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003472 goto fail;
3473
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003474 iter->tr = tr;
3475
3476#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003477 /* Currently only the top directory has a snapshot */
3478 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003479 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003480 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003481#endif
3482 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003483 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003484 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003485 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003486 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003487
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003488 /* Notify the tracer early; before we stop tracing. */
3489 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003490 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003491
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003492 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003493 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003494 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3495
David Sharp8be07092012-11-13 12:18:22 -08003496 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003497 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003498 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3499
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003500 /* stop the trace while dumping if we are not opening "snapshot" */
3501 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003502 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003503
Steven Rostedtae3b5092013-01-23 15:22:59 -05003504 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003505 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003506 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003507 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003508 }
3509 ring_buffer_read_prepare_sync();
3510 for_each_tracing_cpu(cpu) {
3511 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003512 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003513 }
3514 } else {
3515 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003516 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003517 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003518 ring_buffer_read_prepare_sync();
3519 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003520 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003521 }
3522
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003523 mutex_unlock(&trace_types_lock);
3524
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003525 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003526
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003527 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003528 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003529 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003530 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003531release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003532 seq_release_private(inode, file);
3533 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003534}
3535
3536int tracing_open_generic(struct inode *inode, struct file *filp)
3537{
Steven Rostedt60a11772008-05-12 21:20:44 +02003538 if (tracing_disabled)
3539 return -ENODEV;
3540
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003541 filp->private_data = inode->i_private;
3542 return 0;
3543}
3544
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003545bool tracing_is_disabled(void)
3546{
3547 return (tracing_disabled) ? true: false;
3548}
3549
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003550/*
3551 * Open and update trace_array ref count.
3552 * Must have the current trace_array passed to it.
3553 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003554static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003555{
3556 struct trace_array *tr = inode->i_private;
3557
3558 if (tracing_disabled)
3559 return -ENODEV;
3560
3561 if (trace_array_get(tr) < 0)
3562 return -ENODEV;
3563
3564 filp->private_data = inode->i_private;
3565
3566 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003567}
3568
Hannes Eder4fd27352009-02-10 19:44:12 +01003569static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003570{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003571 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003572 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003573 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003574 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003575
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003576 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003577 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003578 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003579 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003580
Oleg Nesterov6484c712013-07-23 17:26:10 +02003581 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003582 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003583 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003584
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003585 for_each_tracing_cpu(cpu) {
3586 if (iter->buffer_iter[cpu])
3587 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3588 }
3589
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003590 if (iter->trace && iter->trace->close)
3591 iter->trace->close(iter);
3592
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003593 if (!iter->snapshot)
3594 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003595 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003596
3597 __trace_array_put(tr);
3598
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003599 mutex_unlock(&trace_types_lock);
3600
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003601 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003602 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003603 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003604 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003605 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003606
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003607 return 0;
3608}
3609
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003610static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3611{
3612 struct trace_array *tr = inode->i_private;
3613
3614 trace_array_put(tr);
3615 return 0;
3616}
3617
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003618static int tracing_single_release_tr(struct inode *inode, struct file *file)
3619{
3620 struct trace_array *tr = inode->i_private;
3621
3622 trace_array_put(tr);
3623
3624 return single_release(inode, file);
3625}
3626
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003627static int tracing_open(struct inode *inode, struct file *file)
3628{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003629 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003630 struct trace_iterator *iter;
3631 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003632
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003633 if (trace_array_get(tr) < 0)
3634 return -ENODEV;
3635
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003636 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003637 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3638 int cpu = tracing_get_cpu(inode);
3639
3640 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003641 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003642 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003643 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003644 }
3645
3646 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003647 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003648 if (IS_ERR(iter))
3649 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003650 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003651 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3652 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003653
3654 if (ret < 0)
3655 trace_array_put(tr);
3656
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003657 return ret;
3658}
3659
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003660/*
3661 * Some tracers are not suitable for instance buffers.
3662 * A tracer is always available for the global array (toplevel)
3663 * or if it explicitly states that it is.
3664 */
3665static bool
3666trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3667{
3668 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3669}
3670
3671/* Find the next tracer that this trace array may use */
3672static struct tracer *
3673get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3674{
3675 while (t && !trace_ok_for_array(t, tr))
3676 t = t->next;
3677
3678 return t;
3679}
3680
Ingo Molnare309b412008-05-12 21:20:51 +02003681static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003682t_next(struct seq_file *m, void *v, loff_t *pos)
3683{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003684 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003685 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003686
3687 (*pos)++;
3688
3689 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003690 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003691
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003692 return t;
3693}
3694
3695static void *t_start(struct seq_file *m, loff_t *pos)
3696{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003697 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003698 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003699 loff_t l = 0;
3700
3701 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003702
3703 t = get_tracer_for_array(tr, trace_types);
3704 for (; t && l < *pos; t = t_next(m, t, &l))
3705 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003706
3707 return t;
3708}
3709
3710static void t_stop(struct seq_file *m, void *p)
3711{
3712 mutex_unlock(&trace_types_lock);
3713}
3714
3715static int t_show(struct seq_file *m, void *v)
3716{
3717 struct tracer *t = v;
3718
3719 if (!t)
3720 return 0;
3721
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003722 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003723 if (t->next)
3724 seq_putc(m, ' ');
3725 else
3726 seq_putc(m, '\n');
3727
3728 return 0;
3729}
3730
James Morris88e9d342009-09-22 16:43:43 -07003731static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003732 .start = t_start,
3733 .next = t_next,
3734 .stop = t_stop,
3735 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003736};
3737
3738static int show_traces_open(struct inode *inode, struct file *file)
3739{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003740 struct trace_array *tr = inode->i_private;
3741 struct seq_file *m;
3742 int ret;
3743
Steven Rostedt60a11772008-05-12 21:20:44 +02003744 if (tracing_disabled)
3745 return -ENODEV;
3746
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003747 ret = seq_open(file, &show_traces_seq_ops);
3748 if (ret)
3749 return ret;
3750
3751 m = file->private_data;
3752 m->private = tr;
3753
3754 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003755}
3756
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003757static ssize_t
3758tracing_write_stub(struct file *filp, const char __user *ubuf,
3759 size_t count, loff_t *ppos)
3760{
3761 return count;
3762}
3763
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003764loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003765{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003766 int ret;
3767
Slava Pestov364829b2010-11-24 15:13:16 -08003768 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003769 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003770 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003771 file->f_pos = ret = 0;
3772
3773 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003774}
3775
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003776static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003777 .open = tracing_open,
3778 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003779 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003780 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003781 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003782};
3783
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003784static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003785 .open = show_traces_open,
3786 .read = seq_read,
3787 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003788 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003789};
3790
Ingo Molnar36dfe922008-05-12 21:20:52 +02003791/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003792 * The tracer itself will not take this lock, but still we want
3793 * to provide a consistent cpumask to user-space:
3794 */
3795static DEFINE_MUTEX(tracing_cpumask_update_lock);
3796
3797/*
3798 * Temporary storage for the character representation of the
3799 * CPU bitmask (and one more byte for the newline):
3800 */
3801static char mask_str[NR_CPUS + 1];
3802
Ingo Molnarc7078de2008-05-12 21:20:52 +02003803static ssize_t
3804tracing_cpumask_read(struct file *filp, char __user *ubuf,
3805 size_t count, loff_t *ppos)
3806{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003807 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003808 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003809
3810 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003811
Tejun Heo1a402432015-02-13 14:37:39 -08003812 len = snprintf(mask_str, count, "%*pb\n",
3813 cpumask_pr_args(tr->tracing_cpumask));
3814 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003815 count = -EINVAL;
3816 goto out_err;
3817 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003818 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3819
3820out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003821 mutex_unlock(&tracing_cpumask_update_lock);
3822
3823 return count;
3824}
3825
3826static ssize_t
3827tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3828 size_t count, loff_t *ppos)
3829{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003830 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303831 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003832 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303833
3834 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3835 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003836
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303837 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003838 if (err)
3839 goto err_unlock;
3840
Li Zefan215368e2009-06-15 10:56:42 +08003841 mutex_lock(&tracing_cpumask_update_lock);
3842
Steven Rostedta5e25882008-12-02 15:34:05 -05003843 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003844 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003845 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003846 /*
3847 * Increase/decrease the disabled counter if we are
3848 * about to flip a bit in the cpumask:
3849 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003850 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303851 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003852 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3853 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003854 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003855 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303856 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003857 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3858 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003859 }
3860 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003861 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003862 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003863
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003864 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003865
Ingo Molnarc7078de2008-05-12 21:20:52 +02003866 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303867 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003868
Ingo Molnarc7078de2008-05-12 21:20:52 +02003869 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003870
3871err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003872 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003873
3874 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003875}
3876
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003877static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003878 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003879 .read = tracing_cpumask_read,
3880 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003881 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003882 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003883};
3884
Li Zefanfdb372e2009-12-08 11:15:59 +08003885static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003886{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003887 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003888 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003889 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003890 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003891
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003892 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003893 tracer_flags = tr->current_trace->flags->val;
3894 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003895
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003896 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003897 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003898 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003899 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003900 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003901 }
3902
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003903 for (i = 0; trace_opts[i].name; i++) {
3904 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003905 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003906 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003907 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003908 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003909 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003910
Li Zefanfdb372e2009-12-08 11:15:59 +08003911 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003912}
3913
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003914static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003915 struct tracer_flags *tracer_flags,
3916 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003917{
Chunyu Hud39cdd22016-03-08 21:37:01 +08003918 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003919 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003920
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003921 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003922 if (ret)
3923 return ret;
3924
3925 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003926 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003927 else
Zhaolei77708412009-08-07 18:53:21 +08003928 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003929 return 0;
3930}
3931
Li Zefan8d18eaa2009-12-08 11:17:06 +08003932/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003933static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003934{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003935 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003936 struct tracer_flags *tracer_flags = trace->flags;
3937 struct tracer_opt *opts = NULL;
3938 int i;
3939
3940 for (i = 0; tracer_flags->opts[i].name; i++) {
3941 opts = &tracer_flags->opts[i];
3942
3943 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003944 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003945 }
3946
3947 return -EINVAL;
3948}
3949
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003950/* Some tracers require overwrite to stay enabled */
3951int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3952{
3953 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3954 return -1;
3955
3956 return 0;
3957}
3958
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003959int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003960{
3961 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003962 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003963 return 0;
3964
3965 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003966 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003967 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003968 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003969
3970 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003971 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003972 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003973 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003974
3975 if (mask == TRACE_ITER_RECORD_CMD)
3976 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003977
Steven Rostedtc37775d2016-04-13 16:59:18 -04003978 if (mask == TRACE_ITER_EVENT_FORK)
3979 trace_event_follow_fork(tr, enabled);
3980
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003981 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003982 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003983#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003984 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003985#endif
3986 }
Steven Rostedt81698832012-10-11 10:15:05 -04003987
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003988 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04003989 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003990 trace_printk_control(enabled);
3991 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003992
3993 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003994}
3995
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003996static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003997{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003998 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003999 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004000 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004001 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004002 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004003
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004004 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004005
Li Zefan8d18eaa2009-12-08 11:17:06 +08004006 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004007 neg = 1;
4008 cmp += 2;
4009 }
4010
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004011 mutex_lock(&trace_types_lock);
4012
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004013 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004014 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004015 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004016 break;
4017 }
4018 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004019
4020 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004021 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004022 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004023
4024 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004025
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004026 /*
4027 * If the first trailing whitespace is replaced with '\0' by strstrip,
4028 * turn it back into a space.
4029 */
4030 if (orig_len > strlen(option))
4031 option[strlen(option)] = ' ';
4032
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004033 return ret;
4034}
4035
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004036static void __init apply_trace_boot_options(void)
4037{
4038 char *buf = trace_boot_options_buf;
4039 char *option;
4040
4041 while (true) {
4042 option = strsep(&buf, ",");
4043
4044 if (!option)
4045 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004046
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004047 if (*option)
4048 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004049
4050 /* Put back the comma to allow this to be called again */
4051 if (buf)
4052 *(buf - 1) = ',';
4053 }
4054}
4055
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004056static ssize_t
4057tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4058 size_t cnt, loff_t *ppos)
4059{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004060 struct seq_file *m = filp->private_data;
4061 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004062 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004063 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004064
4065 if (cnt >= sizeof(buf))
4066 return -EINVAL;
4067
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004068 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004069 return -EFAULT;
4070
Steven Rostedta8dd2172013-01-09 20:54:17 -05004071 buf[cnt] = 0;
4072
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004073 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004074 if (ret < 0)
4075 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004076
Jiri Olsacf8517c2009-10-23 19:36:16 -04004077 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004078
4079 return cnt;
4080}
4081
Li Zefanfdb372e2009-12-08 11:15:59 +08004082static int tracing_trace_options_open(struct inode *inode, struct file *file)
4083{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004084 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004085 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004086
Li Zefanfdb372e2009-12-08 11:15:59 +08004087 if (tracing_disabled)
4088 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004089
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004090 if (trace_array_get(tr) < 0)
4091 return -ENODEV;
4092
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004093 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4094 if (ret < 0)
4095 trace_array_put(tr);
4096
4097 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004098}
4099
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004100static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004101 .open = tracing_trace_options_open,
4102 .read = seq_read,
4103 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004104 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004105 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004106};
4107
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004108static const char readme_msg[] =
4109 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004110 "# echo 0 > tracing_on : quick way to disable tracing\n"
4111 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4112 " Important files:\n"
4113 " trace\t\t\t- The static contents of the buffer\n"
4114 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4115 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4116 " current_tracer\t- function and latency tracers\n"
4117 " available_tracers\t- list of configured tracers for current_tracer\n"
4118 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4119 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4120 " trace_clock\t\t-change the clock used to order events\n"
4121 " local: Per cpu clock but may not be synced across CPUs\n"
4122 " global: Synced across CPUs but slows tracing down.\n"
4123 " counter: Not a clock, but just an increment\n"
4124 " uptime: Jiffy counter from time of boot\n"
4125 " perf: Same clock that perf events use\n"
4126#ifdef CONFIG_X86_64
4127 " x86-tsc: TSC cycle counter\n"
4128#endif
4129 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4130 " tracing_cpumask\t- Limit which CPUs to trace\n"
4131 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4132 "\t\t\t Remove sub-buffer with rmdir\n"
4133 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004134 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4135 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004136 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004137#ifdef CONFIG_DYNAMIC_FTRACE
4138 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004139 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4140 "\t\t\t functions\n"
4141 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4142 "\t modules: Can select a group via module\n"
4143 "\t Format: :mod:<module-name>\n"
4144 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4145 "\t triggers: a command to perform when function is hit\n"
4146 "\t Format: <function>:<trigger>[:count]\n"
4147 "\t trigger: traceon, traceoff\n"
4148 "\t\t enable_event:<system>:<event>\n"
4149 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004150#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004151 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004152#endif
4153#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004154 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004155#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004156 "\t\t dump\n"
4157 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004158 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4159 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4160 "\t The first one will disable tracing every time do_fault is hit\n"
4161 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4162 "\t The first time do trap is hit and it disables tracing, the\n"
4163 "\t counter will decrement to 2. If tracing is already disabled,\n"
4164 "\t the counter will not decrement. It only decrements when the\n"
4165 "\t trigger did work\n"
4166 "\t To remove trigger without count:\n"
4167 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4168 "\t To remove trigger with a count:\n"
4169 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004170 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004171 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4172 "\t modules: Can select a group via module command :mod:\n"
4173 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004174#endif /* CONFIG_DYNAMIC_FTRACE */
4175#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004176 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4177 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004178#endif
4179#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4180 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004181 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004182 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4183#endif
4184#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004185 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4186 "\t\t\t snapshot buffer. Read the contents for more\n"
4187 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004188#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004189#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004190 " stack_trace\t\t- Shows the max stack trace when active\n"
4191 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004192 "\t\t\t Write into this file to reset the max size (trigger a\n"
4193 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004194#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004195 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4196 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004197#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004198#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu86425622016-08-18 17:58:15 +09004199#ifdef CONFIG_KPROBE_EVENT
4200 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4201 "\t\t\t Write into this file to define/undefine new trace events.\n"
4202#endif
4203#ifdef CONFIG_UPROBE_EVENT
4204 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4205 "\t\t\t Write into this file to define/undefine new trace events.\n"
4206#endif
4207#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
4208 "\t accepts: event-definitions (one definition per line)\n"
4209 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4210 "\t -:[<group>/]<event>\n"
4211#ifdef CONFIG_KPROBE_EVENT
4212 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4213#endif
4214#ifdef CONFIG_UPROBE_EVENT
4215 "\t place: <path>:<offset>\n"
4216#endif
4217 "\t args: <name>=fetcharg[:type]\n"
4218 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4219 "\t $stack<index>, $stack, $retval, $comm\n"
4220 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4221 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4222#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004223 " events/\t\t- Directory containing all trace event subsystems:\n"
4224 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4225 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004226 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4227 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004228 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004229 " events/<system>/<event>/\t- Directory containing control files for\n"
4230 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004231 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4232 " filter\t\t- If set, only events passing filter are traced\n"
4233 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004234 "\t Format: <trigger>[:count][if <filter>]\n"
4235 "\t trigger: traceon, traceoff\n"
4236 "\t enable_event:<system>:<event>\n"
4237 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004238#ifdef CONFIG_HIST_TRIGGERS
4239 "\t enable_hist:<system>:<event>\n"
4240 "\t disable_hist:<system>:<event>\n"
4241#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004242#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004243 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004244#endif
4245#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004246 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004247#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004248#ifdef CONFIG_HIST_TRIGGERS
4249 "\t\t hist (see below)\n"
4250#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004251 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4252 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4253 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4254 "\t events/block/block_unplug/trigger\n"
4255 "\t The first disables tracing every time block_unplug is hit.\n"
4256 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4257 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4258 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4259 "\t Like function triggers, the counter is only decremented if it\n"
4260 "\t enabled or disabled tracing.\n"
4261 "\t To remove a trigger without a count:\n"
4262 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4263 "\t To remove a trigger with a count:\n"
4264 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4265 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004266#ifdef CONFIG_HIST_TRIGGERS
4267 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004268 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004269 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004270 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004271 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004272 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004273 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004274 "\t [if <filter>]\n\n"
4275 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004276 "\t table using the key(s) and value(s) named, and the value of a\n"
4277 "\t sum called 'hitcount' is incremented. Keys and values\n"
4278 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004279 "\t can be any field, or the special string 'stacktrace'.\n"
4280 "\t Compound keys consisting of up to two fields can be specified\n"
4281 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4282 "\t fields. Sort keys consisting of up to two fields can be\n"
4283 "\t specified using the 'sort' keyword. The sort direction can\n"
4284 "\t be modified by appending '.descending' or '.ascending' to a\n"
4285 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004286 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4287 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4288 "\t its histogram data will be shared with other triggers of the\n"
4289 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004290 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004291 "\t table in its entirety to stdout. If there are multiple hist\n"
4292 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004293 "\t trigger in the output. The table displayed for a named\n"
4294 "\t trigger will be the same as any other instance having the\n"
4295 "\t same name. The default format used to display a given field\n"
4296 "\t can be modified by appending any of the following modifiers\n"
4297 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004298 "\t .hex display a number as a hex value\n"
4299 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004300 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004301 "\t .execname display a common_pid as a program name\n"
4302 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004303 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004304 "\t The 'pause' parameter can be used to pause an existing hist\n"
4305 "\t trigger or to start a hist trigger but not log any events\n"
4306 "\t until told to do so. 'continue' can be used to start or\n"
4307 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004308 "\t The 'clear' parameter will clear the contents of a running\n"
4309 "\t hist trigger and leave its current paused/active state\n"
4310 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004311 "\t The enable_hist and disable_hist triggers can be used to\n"
4312 "\t have one event conditionally start and stop another event's\n"
4313 "\t already-attached hist trigger. The syntax is analagous to\n"
4314 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004315#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004316;
4317
4318static ssize_t
4319tracing_readme_read(struct file *filp, char __user *ubuf,
4320 size_t cnt, loff_t *ppos)
4321{
4322 return simple_read_from_buffer(ubuf, cnt, ppos,
4323 readme_msg, strlen(readme_msg));
4324}
4325
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004326static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004327 .open = tracing_open_generic,
4328 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004329 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004330};
4331
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004332static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004333{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004334 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004335
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004336 if (*pos || m->count)
4337 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004338
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004339 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004340
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004341 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4342 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004343 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004344 continue;
4345
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004346 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004347 }
4348
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004349 return NULL;
4350}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004351
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004352static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4353{
4354 void *v;
4355 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004356
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004357 preempt_disable();
4358 arch_spin_lock(&trace_cmdline_lock);
4359
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004360 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004361 while (l <= *pos) {
4362 v = saved_cmdlines_next(m, v, &l);
4363 if (!v)
4364 return NULL;
4365 }
4366
4367 return v;
4368}
4369
4370static void saved_cmdlines_stop(struct seq_file *m, void *v)
4371{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004372 arch_spin_unlock(&trace_cmdline_lock);
4373 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004374}
4375
4376static int saved_cmdlines_show(struct seq_file *m, void *v)
4377{
4378 char buf[TASK_COMM_LEN];
4379 unsigned int *pid = v;
4380
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004381 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004382 seq_printf(m, "%d %s\n", *pid, buf);
4383 return 0;
4384}
4385
4386static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4387 .start = saved_cmdlines_start,
4388 .next = saved_cmdlines_next,
4389 .stop = saved_cmdlines_stop,
4390 .show = saved_cmdlines_show,
4391};
4392
4393static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4394{
4395 if (tracing_disabled)
4396 return -ENODEV;
4397
4398 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004399}
4400
4401static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004402 .open = tracing_saved_cmdlines_open,
4403 .read = seq_read,
4404 .llseek = seq_lseek,
4405 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004406};
4407
4408static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004409tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4410 size_t cnt, loff_t *ppos)
4411{
4412 char buf[64];
4413 int r;
Adrian Salido50b2d862017-04-18 11:44:33 -07004414 unsigned int n;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004415
Adrian Salido50b2d862017-04-18 11:44:33 -07004416 preempt_disable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004417 arch_spin_lock(&trace_cmdline_lock);
Adrian Salido50b2d862017-04-18 11:44:33 -07004418 n = savedcmd->cmdline_num;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004419 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salido50b2d862017-04-18 11:44:33 -07004420 preempt_enable();
4421
4422 r = scnprintf(buf, sizeof(buf), "%u\n", n);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004423
4424 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4425}
4426
4427static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4428{
4429 kfree(s->saved_cmdlines);
4430 kfree(s->map_cmdline_to_pid);
Adrian Salido50b2d862017-04-18 11:44:33 -07004431 kfree(s->map_cmdline_to_tgid);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004432 kfree(s);
4433}
4434
4435static int tracing_resize_saved_cmdlines(unsigned int val)
4436{
4437 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4438
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004439 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004440 if (!s)
4441 return -ENOMEM;
4442
4443 if (allocate_cmdlines_buffer(val, s) < 0) {
4444 kfree(s);
4445 return -ENOMEM;
4446 }
4447
Adrian Salido50b2d862017-04-18 11:44:33 -07004448 preempt_disable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004449 arch_spin_lock(&trace_cmdline_lock);
4450 savedcmd_temp = savedcmd;
4451 savedcmd = s;
4452 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salido50b2d862017-04-18 11:44:33 -07004453 preempt_enable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004454 free_saved_cmdlines_buffer(savedcmd_temp);
4455
4456 return 0;
4457}
4458
4459static ssize_t
4460tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4461 size_t cnt, loff_t *ppos)
4462{
4463 unsigned long val;
4464 int ret;
4465
4466 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4467 if (ret)
4468 return ret;
4469
4470 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4471 if (!val || val > PID_MAX_DEFAULT)
4472 return -EINVAL;
4473
4474 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4475 if (ret < 0)
4476 return ret;
4477
4478 *ppos += cnt;
4479
4480 return cnt;
4481}
4482
4483static const struct file_operations tracing_saved_cmdlines_size_fops = {
4484 .open = tracing_open_generic,
4485 .read = tracing_saved_cmdlines_size_read,
4486 .write = tracing_saved_cmdlines_size_write,
4487};
4488
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004489#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4490static union trace_enum_map_item *
4491update_enum_map(union trace_enum_map_item *ptr)
4492{
4493 if (!ptr->map.enum_string) {
4494 if (ptr->tail.next) {
4495 ptr = ptr->tail.next;
4496 /* Set ptr to the next real item (skip head) */
4497 ptr++;
4498 } else
4499 return NULL;
4500 }
4501 return ptr;
4502}
4503
4504static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4505{
4506 union trace_enum_map_item *ptr = v;
4507
4508 /*
4509 * Paranoid! If ptr points to end, we don't want to increment past it.
4510 * This really should never happen.
4511 */
4512 ptr = update_enum_map(ptr);
4513 if (WARN_ON_ONCE(!ptr))
4514 return NULL;
4515
4516 ptr++;
4517
4518 (*pos)++;
4519
4520 ptr = update_enum_map(ptr);
4521
4522 return ptr;
4523}
4524
4525static void *enum_map_start(struct seq_file *m, loff_t *pos)
4526{
4527 union trace_enum_map_item *v;
4528 loff_t l = 0;
4529
4530 mutex_lock(&trace_enum_mutex);
4531
4532 v = trace_enum_maps;
4533 if (v)
4534 v++;
4535
4536 while (v && l < *pos) {
4537 v = enum_map_next(m, v, &l);
4538 }
4539
4540 return v;
4541}
4542
4543static void enum_map_stop(struct seq_file *m, void *v)
4544{
4545 mutex_unlock(&trace_enum_mutex);
4546}
4547
4548static int enum_map_show(struct seq_file *m, void *v)
4549{
4550 union trace_enum_map_item *ptr = v;
4551
4552 seq_printf(m, "%s %ld (%s)\n",
4553 ptr->map.enum_string, ptr->map.enum_value,
4554 ptr->map.system);
4555
4556 return 0;
4557}
4558
4559static const struct seq_operations tracing_enum_map_seq_ops = {
4560 .start = enum_map_start,
4561 .next = enum_map_next,
4562 .stop = enum_map_stop,
4563 .show = enum_map_show,
4564};
4565
4566static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4567{
4568 if (tracing_disabled)
4569 return -ENODEV;
4570
4571 return seq_open(filp, &tracing_enum_map_seq_ops);
4572}
4573
4574static const struct file_operations tracing_enum_map_fops = {
4575 .open = tracing_enum_map_open,
4576 .read = seq_read,
4577 .llseek = seq_lseek,
4578 .release = seq_release,
4579};
4580
4581static inline union trace_enum_map_item *
4582trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4583{
4584 /* Return tail of array given the head */
4585 return ptr + ptr->head.length + 1;
4586}
4587
4588static void
4589trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4590 int len)
4591{
4592 struct trace_enum_map **stop;
4593 struct trace_enum_map **map;
4594 union trace_enum_map_item *map_array;
4595 union trace_enum_map_item *ptr;
4596
4597 stop = start + len;
4598
4599 /*
4600 * The trace_enum_maps contains the map plus a head and tail item,
4601 * where the head holds the module and length of array, and the
4602 * tail holds a pointer to the next list.
4603 */
4604 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4605 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004606 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004607 return;
4608 }
4609
4610 mutex_lock(&trace_enum_mutex);
4611
4612 if (!trace_enum_maps)
4613 trace_enum_maps = map_array;
4614 else {
4615 ptr = trace_enum_maps;
4616 for (;;) {
4617 ptr = trace_enum_jmp_to_tail(ptr);
4618 if (!ptr->tail.next)
4619 break;
4620 ptr = ptr->tail.next;
4621
4622 }
4623 ptr->tail.next = map_array;
4624 }
4625 map_array->head.mod = mod;
4626 map_array->head.length = len;
4627 map_array++;
4628
4629 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4630 map_array->map = **map;
4631 map_array++;
4632 }
4633 memset(map_array, 0, sizeof(*map_array));
4634
4635 mutex_unlock(&trace_enum_mutex);
4636}
4637
4638static void trace_create_enum_file(struct dentry *d_tracer)
4639{
4640 trace_create_file("enum_map", 0444, d_tracer,
4641 NULL, &tracing_enum_map_fops);
4642}
4643
4644#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4645static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4646static inline void trace_insert_enum_map_file(struct module *mod,
4647 struct trace_enum_map **start, int len) { }
4648#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4649
4650static void trace_insert_enum_map(struct module *mod,
4651 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004652{
4653 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004654
4655 if (len <= 0)
4656 return;
4657
4658 map = start;
4659
4660 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004661
4662 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004663}
4664
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004665static ssize_t
Jamie Gennis13b625d2012-11-21 15:04:25 -08004666tracing_saved_tgids_read(struct file *file, char __user *ubuf,
4667 size_t cnt, loff_t *ppos)
4668{
4669 char *file_buf;
4670 char *buf;
4671 int len = 0;
Jamie Gennis13b625d2012-11-21 15:04:25 -08004672 int i;
Adrian Salido50b2d862017-04-18 11:44:33 -07004673 int *pids;
4674 int n = 0;
Jamie Gennis13b625d2012-11-21 15:04:25 -08004675
Adrian Salido50b2d862017-04-18 11:44:33 -07004676 preempt_disable();
4677 arch_spin_lock(&trace_cmdline_lock);
4678
4679 pids = kmalloc_array(savedcmd->cmdline_num, 2*sizeof(int), GFP_KERNEL);
4680 if (!pids) {
4681 arch_spin_unlock(&trace_cmdline_lock);
4682 preempt_enable();
Jamie Gennis13b625d2012-11-21 15:04:25 -08004683 return -ENOMEM;
Adrian Salido50b2d862017-04-18 11:44:33 -07004684 }
Jamie Gennis13b625d2012-11-21 15:04:25 -08004685
Adrian Salido50b2d862017-04-18 11:44:33 -07004686 for (i = 0; i < savedcmd->cmdline_num; i++) {
4687 int pid;
Jamie Gennis13b625d2012-11-21 15:04:25 -08004688
Dmitry Shmidtcb575f72015-10-28 10:45:04 -07004689 pid = savedcmd->map_cmdline_to_pid[i];
Jamie Gennis13b625d2012-11-21 15:04:25 -08004690 if (pid == -1 || pid == NO_CMDLINE_MAP)
4691 continue;
4692
Adrian Salido50b2d862017-04-18 11:44:33 -07004693 pids[n] = pid;
4694 pids[n+1] = __find_tgid_locked(pid);
4695 n += 2;
4696 }
4697 arch_spin_unlock(&trace_cmdline_lock);
4698 preempt_enable();
4699
4700 if (n == 0) {
4701 kfree(pids);
4702 return 0;
4703 }
4704
4705 /* enough to hold max pair of pids + space, lr and nul */
4706 len = n * 12;
4707 file_buf = kmalloc(len, GFP_KERNEL);
4708 if (!file_buf) {
4709 kfree(pids);
4710 return -ENOMEM;
4711 }
4712
4713 buf = file_buf;
4714 for (i = 0; i < n && len > 0; i += 2) {
4715 int r;
4716
4717 r = snprintf(buf, len, "%d %d\n", pids[i], pids[i+1]);
Jamie Gennis13b625d2012-11-21 15:04:25 -08004718 buf += r;
Adrian Salido50b2d862017-04-18 11:44:33 -07004719 len -= r;
Jamie Gennis13b625d2012-11-21 15:04:25 -08004720 }
4721
4722 len = simple_read_from_buffer(ubuf, cnt, ppos,
Adrian Salido50b2d862017-04-18 11:44:33 -07004723 file_buf, buf - file_buf);
Jamie Gennis13b625d2012-11-21 15:04:25 -08004724
4725 kfree(file_buf);
Adrian Salido50b2d862017-04-18 11:44:33 -07004726 kfree(pids);
Jamie Gennis13b625d2012-11-21 15:04:25 -08004727
4728 return len;
4729}
4730
4731static const struct file_operations tracing_saved_tgids_fops = {
4732 .open = tracing_open_generic,
4733 .read = tracing_saved_tgids_read,
4734 .llseek = generic_file_llseek,
4735};
4736
4737static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004738tracing_set_trace_read(struct file *filp, char __user *ubuf,
4739 size_t cnt, loff_t *ppos)
4740{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004741 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004742 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004743 int r;
4744
4745 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004746 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004747 mutex_unlock(&trace_types_lock);
4748
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004749 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004750}
4751
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004752int tracer_init(struct tracer *t, struct trace_array *tr)
4753{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004754 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004755 return t->init(tr);
4756}
4757
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004758static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004759{
4760 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004761
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004762 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004763 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004764}
4765
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004766#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004767/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004768static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4769 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004770{
4771 int cpu, ret = 0;
4772
4773 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4774 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004775 ret = ring_buffer_resize(trace_buf->buffer,
4776 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004777 if (ret < 0)
4778 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004779 per_cpu_ptr(trace_buf->data, cpu)->entries =
4780 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004781 }
4782 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004783 ret = ring_buffer_resize(trace_buf->buffer,
4784 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004785 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004786 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4787 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004788 }
4789
4790 return ret;
4791}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004792#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004793
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004794static int __tracing_resize_ring_buffer(struct trace_array *tr,
4795 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004796{
4797 int ret;
4798
4799 /*
4800 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004801 * we use the size that was given, and we can forget about
4802 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004803 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004804 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004805
Steven Rostedtb382ede62012-10-10 21:44:34 -04004806 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004807 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004808 return 0;
4809
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004810 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004811 if (ret < 0)
4812 return ret;
4813
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004814#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004815 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4816 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004817 goto out;
4818
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004819 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004820 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004821 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4822 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004823 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004824 /*
4825 * AARGH! We are left with different
4826 * size max buffer!!!!
4827 * The max buffer is our "snapshot" buffer.
4828 * When a tracer needs a snapshot (one of the
4829 * latency tracers), it swaps the max buffer
4830 * with the saved snap shot. We succeeded to
4831 * update the size of the main buffer, but failed to
4832 * update the size of the max buffer. But when we tried
4833 * to reset the main buffer to the original size, we
4834 * failed there too. This is very unlikely to
4835 * happen, but if it does, warn and kill all
4836 * tracing.
4837 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004838 WARN_ON(1);
4839 tracing_disabled = 1;
4840 }
4841 return ret;
4842 }
4843
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004844 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004845 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004846 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004847 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004848
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004849 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004850#endif /* CONFIG_TRACER_MAX_TRACE */
4851
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004852 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004853 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004854 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004855 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004856
4857 return ret;
4858}
4859
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004860static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4861 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004862{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004863 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004864
4865 mutex_lock(&trace_types_lock);
4866
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004867 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4868 /* make sure, this cpu is enabled in the mask */
4869 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4870 ret = -EINVAL;
4871 goto out;
4872 }
4873 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004874
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004875 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004876 if (ret < 0)
4877 ret = -ENOMEM;
4878
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004879out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004880 mutex_unlock(&trace_types_lock);
4881
4882 return ret;
4883}
4884
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004885
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004886/**
4887 * tracing_update_buffers - used by tracing facility to expand ring buffers
4888 *
4889 * To save on memory when the tracing is never used on a system with it
4890 * configured in. The ring buffers are set to a minimum size. But once
4891 * a user starts to use the tracing facility, then they need to grow
4892 * to their default size.
4893 *
4894 * This function is to be called when a tracer is about to be used.
4895 */
4896int tracing_update_buffers(void)
4897{
4898 int ret = 0;
4899
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004900 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004901 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004902 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004903 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004904 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004905
4906 return ret;
4907}
4908
Steven Rostedt577b7852009-02-26 23:43:05 -05004909struct trace_option_dentry;
4910
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004911static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004912create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004913
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004914/*
4915 * Used to clear out the tracer before deletion of an instance.
4916 * Must have trace_types_lock held.
4917 */
4918static void tracing_set_nop(struct trace_array *tr)
4919{
4920 if (tr->current_trace == &nop_trace)
4921 return;
4922
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004923 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004924
4925 if (tr->current_trace->reset)
4926 tr->current_trace->reset(tr);
4927
4928 tr->current_trace = &nop_trace;
4929}
4930
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004931static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004932{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004933 /* Only enable if the directory has been created already. */
4934 if (!tr->dir)
4935 return;
4936
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004937 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004938}
4939
4940static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4941{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004942 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004943#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004944 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004945#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004946 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004947
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004948 mutex_lock(&trace_types_lock);
4949
Steven Rostedt73c51622009-03-11 13:42:01 -04004950 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004951 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004952 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004953 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004954 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004955 ret = 0;
4956 }
4957
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004958 for (t = trace_types; t; t = t->next) {
4959 if (strcmp(t->name, buf) == 0)
4960 break;
4961 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004962 if (!t) {
4963 ret = -EINVAL;
4964 goto out;
4965 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004966 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004967 goto out;
4968
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004969 /* Some tracers are only allowed for the top level buffer */
4970 if (!trace_ok_for_array(t, tr)) {
4971 ret = -EINVAL;
4972 goto out;
4973 }
4974
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004975 /* If trace pipe files are being read, we can't change the tracer */
4976 if (tr->current_trace->ref) {
4977 ret = -EBUSY;
4978 goto out;
4979 }
4980
Steven Rostedt9f029e82008-11-12 15:24:24 -05004981 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004982
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004983 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004984
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004985 if (tr->current_trace->reset)
4986 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004987
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004988 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004989 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004990
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004991#ifdef CONFIG_TRACER_MAX_TRACE
4992 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004993
4994 if (had_max_tr && !t->use_max_tr) {
4995 /*
4996 * We need to make sure that the update_max_tr sees that
4997 * current_trace changed to nop_trace to keep it from
4998 * swapping the buffers after we resize it.
4999 * The update_max_tr is called from interrupts disabled
5000 * so a synchronized_sched() is sufficient.
5001 */
5002 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005003 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005004 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005005#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005006
5007#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005008 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005009 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005010 if (ret < 0)
5011 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005012 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005013#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005014
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005015 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005016 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005017 if (ret)
5018 goto out;
5019 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005020
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005021 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005022 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005023 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005024 out:
5025 mutex_unlock(&trace_types_lock);
5026
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005027 return ret;
5028}
5029
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005030static ssize_t
5031tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5032 size_t cnt, loff_t *ppos)
5033{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005034 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005035 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005036 int i;
5037 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005038 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005039
Steven Rostedt60063a62008-10-28 10:44:24 -04005040 ret = cnt;
5041
Li Zefanee6c2c12009-09-18 14:06:47 +08005042 if (cnt > MAX_TRACER_SIZE)
5043 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005044
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005045 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005046 return -EFAULT;
5047
5048 buf[cnt] = 0;
5049
5050 /* strip ending whitespace. */
5051 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5052 buf[i] = 0;
5053
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005054 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005055 if (err)
5056 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005057
Jiri Olsacf8517c2009-10-23 19:36:16 -04005058 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005059
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005060 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005061}
5062
5063static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005064tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5065 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005066{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005067 char buf[64];
5068 int r;
5069
Steven Rostedtcffae432008-05-12 21:21:00 +02005070 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005071 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005072 if (r > sizeof(buf))
5073 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005074 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005075}
5076
5077static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005078tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5079 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005080{
Hannes Eder5e398412009-02-10 19:44:34 +01005081 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005082 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005083
Peter Huewe22fe9b52011-06-07 21:58:27 +02005084 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5085 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005086 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005087
5088 *ptr = val * 1000;
5089
5090 return cnt;
5091}
5092
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005093static ssize_t
5094tracing_thresh_read(struct file *filp, char __user *ubuf,
5095 size_t cnt, loff_t *ppos)
5096{
5097 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5098}
5099
5100static ssize_t
5101tracing_thresh_write(struct file *filp, const char __user *ubuf,
5102 size_t cnt, loff_t *ppos)
5103{
5104 struct trace_array *tr = filp->private_data;
5105 int ret;
5106
5107 mutex_lock(&trace_types_lock);
5108 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5109 if (ret < 0)
5110 goto out;
5111
5112 if (tr->current_trace->update_thresh) {
5113 ret = tr->current_trace->update_thresh(tr);
5114 if (ret < 0)
5115 goto out;
5116 }
5117
5118 ret = cnt;
5119out:
5120 mutex_unlock(&trace_types_lock);
5121
5122 return ret;
5123}
5124
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005125#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005126
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005127static ssize_t
5128tracing_max_lat_read(struct file *filp, char __user *ubuf,
5129 size_t cnt, loff_t *ppos)
5130{
5131 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5132}
5133
5134static ssize_t
5135tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5136 size_t cnt, loff_t *ppos)
5137{
5138 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5139}
5140
Chen Gange428abb2015-11-10 05:15:15 +08005141#endif
5142
Steven Rostedtb3806b42008-05-12 21:20:46 +02005143static int tracing_open_pipe(struct inode *inode, struct file *filp)
5144{
Oleg Nesterov15544202013-07-23 17:25:57 +02005145 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005146 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005147 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005148
5149 if (tracing_disabled)
5150 return -ENODEV;
5151
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005152 if (trace_array_get(tr) < 0)
5153 return -ENODEV;
5154
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005155 mutex_lock(&trace_types_lock);
5156
Steven Rostedtb3806b42008-05-12 21:20:46 +02005157 /* create a buffer to store the information to pass to userspace */
5158 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005159 if (!iter) {
5160 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005161 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005162 goto out;
5163 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005164
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005165 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005166 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005167
5168 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5169 ret = -ENOMEM;
5170 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305171 }
5172
Steven Rostedta3097202008-11-07 22:36:02 -05005173 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305174 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005175
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005176 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005177 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5178
David Sharp8be07092012-11-13 12:18:22 -08005179 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005180 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005181 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5182
Oleg Nesterov15544202013-07-23 17:25:57 +02005183 iter->tr = tr;
5184 iter->trace_buffer = &tr->trace_buffer;
5185 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005186 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005187 filp->private_data = iter;
5188
Steven Rostedt107bad82008-05-12 21:21:01 +02005189 if (iter->trace->pipe_open)
5190 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005191
Arnd Bergmannb4447862010-07-07 23:40:11 +02005192 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005193
5194 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005195out:
5196 mutex_unlock(&trace_types_lock);
5197 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005198
5199fail:
5200 kfree(iter->trace);
5201 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005202 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005203 mutex_unlock(&trace_types_lock);
5204 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005205}
5206
5207static int tracing_release_pipe(struct inode *inode, struct file *file)
5208{
5209 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005210 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005211
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005212 mutex_lock(&trace_types_lock);
5213
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005214 tr->current_trace->ref--;
5215
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005216 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005217 iter->trace->pipe_close(iter);
5218
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005219 mutex_unlock(&trace_types_lock);
5220
Rusty Russell44623442009-01-01 10:12:23 +10305221 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005222 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005223 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005224
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005225 trace_array_put(tr);
5226
Steven Rostedtb3806b42008-05-12 21:20:46 +02005227 return 0;
5228}
5229
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005230static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005231trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005232{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005233 struct trace_array *tr = iter->tr;
5234
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005235 /* Iterators are static, they should be filled or empty */
5236 if (trace_buffer_iter(iter, iter->cpu_file))
5237 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005238
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005239 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005240 /*
5241 * Always select as readable when in blocking mode
5242 */
5243 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005244 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005245 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005246 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005247}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005248
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005249static unsigned int
5250tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5251{
5252 struct trace_iterator *iter = filp->private_data;
5253
5254 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005255}
5256
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005257/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005258static int tracing_wait_pipe(struct file *filp)
5259{
5260 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005261 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005262
5263 while (trace_empty(iter)) {
5264
5265 if ((filp->f_flags & O_NONBLOCK)) {
5266 return -EAGAIN;
5267 }
5268
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005269 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005270 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005271 * We still block if tracing is disabled, but we have never
5272 * read anything. This allows a user to cat this file, and
5273 * then enable tracing. But after we have read something,
5274 * we give an EOF when tracing is again disabled.
5275 *
5276 * iter->pos will be 0 if we haven't read anything.
5277 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005278 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005279 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005280
5281 mutex_unlock(&iter->mutex);
5282
Rabin Vincente30f53a2014-11-10 19:46:34 +01005283 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005284
5285 mutex_lock(&iter->mutex);
5286
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005287 if (ret)
5288 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005289 }
5290
5291 return 1;
5292}
5293
Steven Rostedtb3806b42008-05-12 21:20:46 +02005294/*
5295 * Consumer reader.
5296 */
5297static ssize_t
5298tracing_read_pipe(struct file *filp, char __user *ubuf,
5299 size_t cnt, loff_t *ppos)
5300{
5301 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005302 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005303
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005304 /*
5305 * Avoid more than one consumer on a single file descriptor
5306 * This is just a matter of traces coherency, the ring buffer itself
5307 * is protected.
5308 */
5309 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005310
5311 /* return any leftover data */
5312 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5313 if (sret != -EBUSY)
5314 goto out;
5315
5316 trace_seq_init(&iter->seq);
5317
Steven Rostedt107bad82008-05-12 21:21:01 +02005318 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005319 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5320 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005321 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005322 }
5323
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005324waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005325 sret = tracing_wait_pipe(filp);
5326 if (sret <= 0)
5327 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005328
5329 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005330 if (trace_empty(iter)) {
5331 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005332 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005333 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005334
5335 if (cnt >= PAGE_SIZE)
5336 cnt = PAGE_SIZE - 1;
5337
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005338 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005339 memset(&iter->seq, 0,
5340 sizeof(struct trace_iterator) -
5341 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005342 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005343 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005344
Lai Jiangshan4f535962009-05-18 19:35:34 +08005345 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005346 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005347 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005348 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005349 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005350
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005351 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005352 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005353 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005354 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005355 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005356 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005357 if (ret != TRACE_TYPE_NO_CONSUME)
5358 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005359
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005360 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005361 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005362
5363 /*
5364 * Setting the full flag means we reached the trace_seq buffer
5365 * size and we should leave by partial output condition above.
5366 * One of the trace_seq_* functions is not used properly.
5367 */
5368 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5369 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005370 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005371 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005372 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005373
Steven Rostedtb3806b42008-05-12 21:20:46 +02005374 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005375 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005376 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005377 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005378
5379 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005380 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005381 * entries, go back to wait for more entries.
5382 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005383 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005384 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005385
Steven Rostedt107bad82008-05-12 21:21:01 +02005386out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005387 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005388
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005389 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005390}
5391
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005392static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5393 unsigned int idx)
5394{
5395 __free_page(spd->pages[idx]);
5396}
5397
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005398static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005399 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005400 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005401 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005402 .steal = generic_pipe_buf_steal,
5403 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005404};
5405
Steven Rostedt34cd4992009-02-09 12:06:29 -05005406static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005407tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005408{
5409 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005410 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005411 int ret;
5412
5413 /* Seq buffer is page-sized, exactly what we need. */
5414 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005415 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005416 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005417
5418 if (trace_seq_has_overflowed(&iter->seq)) {
5419 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005420 break;
5421 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005422
5423 /*
5424 * This should not be hit, because it should only
5425 * be set if the iter->seq overflowed. But check it
5426 * anyway to be safe.
5427 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005428 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005429 iter->seq.seq.len = save_len;
5430 break;
5431 }
5432
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005433 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005434 if (rem < count) {
5435 rem = 0;
5436 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005437 break;
5438 }
5439
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005440 if (ret != TRACE_TYPE_NO_CONSUME)
5441 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005442 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005443 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005444 rem = 0;
5445 iter->ent = NULL;
5446 break;
5447 }
5448 }
5449
5450 return rem;
5451}
5452
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005453static ssize_t tracing_splice_read_pipe(struct file *filp,
5454 loff_t *ppos,
5455 struct pipe_inode_info *pipe,
5456 size_t len,
5457 unsigned int flags)
5458{
Jens Axboe35f3d142010-05-20 10:43:18 +02005459 struct page *pages_def[PIPE_DEF_BUFFERS];
5460 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005461 struct trace_iterator *iter = filp->private_data;
5462 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005463 .pages = pages_def,
5464 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005465 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005466 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005467 .flags = flags,
5468 .ops = &tracing_pipe_buf_ops,
5469 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005470 };
5471 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005472 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005473 unsigned int i;
5474
Jens Axboe35f3d142010-05-20 10:43:18 +02005475 if (splice_grow_spd(pipe, &spd))
5476 return -ENOMEM;
5477
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005478 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005479
5480 if (iter->trace->splice_read) {
5481 ret = iter->trace->splice_read(iter, filp,
5482 ppos, pipe, len, flags);
5483 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005484 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005485 }
5486
5487 ret = tracing_wait_pipe(filp);
5488 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005489 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005490
Jason Wessel955b61e2010-08-05 09:22:23 -05005491 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005492 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005493 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005494 }
5495
Lai Jiangshan4f535962009-05-18 19:35:34 +08005496 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005497 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005498
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005499 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005500 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005501 spd.pages[i] = alloc_page(GFP_KERNEL);
5502 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005503 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005504
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005505 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005506
5507 /* Copy the data into the page, so we can start over. */
5508 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005509 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005510 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005511 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005512 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005513 break;
5514 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005515 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005516 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005517
Steven Rostedtf9520752009-03-02 14:04:40 -05005518 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005519 }
5520
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005521 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005522 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005523 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005524
5525 spd.nr_pages = i;
5526
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005527 if (i)
5528 ret = splice_to_pipe(pipe, &spd);
5529 else
5530 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005531out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005532 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005533 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005534
Steven Rostedt34cd4992009-02-09 12:06:29 -05005535out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005536 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005537 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005538}
5539
Steven Rostedta98a3c32008-05-12 21:20:59 +02005540static ssize_t
5541tracing_entries_read(struct file *filp, char __user *ubuf,
5542 size_t cnt, loff_t *ppos)
5543{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005544 struct inode *inode = file_inode(filp);
5545 struct trace_array *tr = inode->i_private;
5546 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005547 char buf[64];
5548 int r = 0;
5549 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005550
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005551 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005552
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005553 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005554 int cpu, buf_size_same;
5555 unsigned long size;
5556
5557 size = 0;
5558 buf_size_same = 1;
5559 /* check if all cpu sizes are same */
5560 for_each_tracing_cpu(cpu) {
5561 /* fill in the size from first enabled cpu */
5562 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005563 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5564 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005565 buf_size_same = 0;
5566 break;
5567 }
5568 }
5569
5570 if (buf_size_same) {
5571 if (!ring_buffer_expanded)
5572 r = sprintf(buf, "%lu (expanded: %lu)\n",
5573 size >> 10,
5574 trace_buf_size >> 10);
5575 else
5576 r = sprintf(buf, "%lu\n", size >> 10);
5577 } else
5578 r = sprintf(buf, "X\n");
5579 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005580 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005581
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005582 mutex_unlock(&trace_types_lock);
5583
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005584 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5585 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005586}
5587
5588static ssize_t
5589tracing_entries_write(struct file *filp, const char __user *ubuf,
5590 size_t cnt, loff_t *ppos)
5591{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005592 struct inode *inode = file_inode(filp);
5593 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005594 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005595 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005596
Peter Huewe22fe9b52011-06-07 21:58:27 +02005597 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5598 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005599 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005600
5601 /* must have at least 1 entry */
5602 if (!val)
5603 return -EINVAL;
5604
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005605 /* value is in KB */
5606 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005607 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005608 if (ret < 0)
5609 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005610
Jiri Olsacf8517c2009-10-23 19:36:16 -04005611 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005612
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005613 return cnt;
5614}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005615
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005616static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005617tracing_total_entries_read(struct file *filp, char __user *ubuf,
5618 size_t cnt, loff_t *ppos)
5619{
5620 struct trace_array *tr = filp->private_data;
5621 char buf[64];
5622 int r, cpu;
5623 unsigned long size = 0, expanded_size = 0;
5624
5625 mutex_lock(&trace_types_lock);
5626 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005627 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005628 if (!ring_buffer_expanded)
5629 expanded_size += trace_buf_size >> 10;
5630 }
5631 if (ring_buffer_expanded)
5632 r = sprintf(buf, "%lu\n", size);
5633 else
5634 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5635 mutex_unlock(&trace_types_lock);
5636
5637 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5638}
5639
5640static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005641tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5642 size_t cnt, loff_t *ppos)
5643{
5644 /*
5645 * There is no need to read what the user has written, this function
5646 * is just to make sure that there is no error when "echo" is used
5647 */
5648
5649 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005650
5651 return cnt;
5652}
5653
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005654static int
5655tracing_free_buffer_release(struct inode *inode, struct file *filp)
5656{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005657 struct trace_array *tr = inode->i_private;
5658
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005659 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005660 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005661 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005662 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005663 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005664
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005665 trace_array_put(tr);
5666
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005667 return 0;
5668}
5669
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005670static ssize_t
5671tracing_mark_write(struct file *filp, const char __user *ubuf,
5672 size_t cnt, loff_t *fpos)
5673{
Steven Rostedtd696b582011-09-22 11:50:27 -04005674 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005675 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005676 struct ring_buffer_event *event;
5677 struct ring_buffer *buffer;
5678 struct print_entry *entry;
5679 unsigned long irq_flags;
5680 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005681 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005682 int nr_pages = 1;
5683 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005684 int offset;
5685 int size;
5686 int len;
5687 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005688 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005689
Steven Rostedtc76f0692008-11-07 22:36:02 -05005690 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005691 return -EINVAL;
5692
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005693 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005694 return -EINVAL;
5695
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005696 if (cnt > TRACE_BUF_SIZE)
5697 cnt = TRACE_BUF_SIZE;
5698
Steven Rostedtd696b582011-09-22 11:50:27 -04005699 /*
5700 * Userspace is injecting traces into the kernel trace buffer.
5701 * We want to be as non intrusive as possible.
5702 * To do so, we do not want to allocate any special buffers
5703 * or take any locks, but instead write the userspace data
5704 * straight into the ring buffer.
5705 *
5706 * First we need to pin the userspace buffer into memory,
5707 * which, most likely it is, because it just referenced it.
5708 * But there's no guarantee that it is. By using get_user_pages_fast()
5709 * and kmap_atomic/kunmap_atomic() we can get access to the
5710 * pages directly. We then write the data directly into the
5711 * ring buffer.
5712 */
5713 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005714
Steven Rostedtd696b582011-09-22 11:50:27 -04005715 /* check if we cross pages */
5716 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5717 nr_pages = 2;
5718
5719 offset = addr & (PAGE_SIZE - 1);
5720 addr &= PAGE_MASK;
5721
5722 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5723 if (ret < nr_pages) {
5724 while (--ret >= 0)
5725 put_page(pages[ret]);
5726 written = -EFAULT;
5727 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005728 }
5729
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005730 for (i = 0; i < nr_pages; i++)
5731 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005732
5733 local_save_flags(irq_flags);
5734 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005735 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005736 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5737 irq_flags, preempt_count());
5738 if (!event) {
5739 /* Ring buffer disabled, return as if not open for write */
5740 written = -EBADF;
5741 goto out_unlock;
5742 }
5743
5744 entry = ring_buffer_event_data(event);
5745 entry->ip = _THIS_IP_;
5746
5747 if (nr_pages == 2) {
5748 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005749 memcpy(&entry->buf, map_page[0] + offset, len);
5750 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005751 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005752 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005753
5754 if (entry->buf[cnt - 1] != '\n') {
5755 entry->buf[cnt] = '\n';
5756 entry->buf[cnt + 1] = '\0';
Shashank Mittal43beb422016-05-20 13:06:09 -07005757 stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
5758 } else {
Steven Rostedtd696b582011-09-22 11:50:27 -04005759 entry->buf[cnt] = '\0';
Shashank Mittal43beb422016-05-20 13:06:09 -07005760 stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
5761 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005762
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005763 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005764
5765 written = cnt;
5766
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005767 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005768
Steven Rostedtd696b582011-09-22 11:50:27 -04005769 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005770 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005771 kunmap_atomic(map_page[i]);
5772 put_page(pages[i]);
5773 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005774 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005775 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005776}
5777
Li Zefan13f16d22009-12-08 11:16:11 +08005778static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005779{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005780 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005781 int i;
5782
5783 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005784 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005785 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005786 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5787 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005788 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005789
Li Zefan13f16d22009-12-08 11:16:11 +08005790 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005791}
5792
Steven Rostedte1e232c2014-02-10 23:38:46 -05005793static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005794{
Zhaolei5079f322009-08-25 16:12:56 +08005795 int i;
5796
Zhaolei5079f322009-08-25 16:12:56 +08005797 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5798 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5799 break;
5800 }
5801 if (i == ARRAY_SIZE(trace_clocks))
5802 return -EINVAL;
5803
Zhaolei5079f322009-08-25 16:12:56 +08005804 mutex_lock(&trace_types_lock);
5805
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005806 tr->clock_id = i;
5807
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005808 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005809
David Sharp60303ed2012-10-11 16:27:52 -07005810 /*
5811 * New clock may not be consistent with the previous clock.
5812 * Reset the buffer so that it doesn't have incomparable timestamps.
5813 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005814 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005815
5816#ifdef CONFIG_TRACER_MAX_TRACE
5817 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5818 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005819 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005820#endif
David Sharp60303ed2012-10-11 16:27:52 -07005821
Zhaolei5079f322009-08-25 16:12:56 +08005822 mutex_unlock(&trace_types_lock);
5823
Steven Rostedte1e232c2014-02-10 23:38:46 -05005824 return 0;
5825}
5826
5827static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5828 size_t cnt, loff_t *fpos)
5829{
5830 struct seq_file *m = filp->private_data;
5831 struct trace_array *tr = m->private;
5832 char buf[64];
5833 const char *clockstr;
5834 int ret;
5835
5836 if (cnt >= sizeof(buf))
5837 return -EINVAL;
5838
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005839 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05005840 return -EFAULT;
5841
5842 buf[cnt] = 0;
5843
5844 clockstr = strstrip(buf);
5845
5846 ret = tracing_set_clock(tr, clockstr);
5847 if (ret)
5848 return ret;
5849
Zhaolei5079f322009-08-25 16:12:56 +08005850 *fpos += cnt;
5851
5852 return cnt;
5853}
5854
Li Zefan13f16d22009-12-08 11:16:11 +08005855static int tracing_clock_open(struct inode *inode, struct file *file)
5856{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005857 struct trace_array *tr = inode->i_private;
5858 int ret;
5859
Li Zefan13f16d22009-12-08 11:16:11 +08005860 if (tracing_disabled)
5861 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005862
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005863 if (trace_array_get(tr))
5864 return -ENODEV;
5865
5866 ret = single_open(file, tracing_clock_show, inode->i_private);
5867 if (ret < 0)
5868 trace_array_put(tr);
5869
5870 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005871}
5872
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005873struct ftrace_buffer_info {
5874 struct trace_iterator iter;
5875 void *spare;
5876 unsigned int read;
5877};
5878
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005879#ifdef CONFIG_TRACER_SNAPSHOT
5880static int tracing_snapshot_open(struct inode *inode, struct file *file)
5881{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005882 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005883 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005884 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005885 int ret = 0;
5886
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005887 if (trace_array_get(tr) < 0)
5888 return -ENODEV;
5889
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005890 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005891 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005892 if (IS_ERR(iter))
5893 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005894 } else {
5895 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005896 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005897 m = kzalloc(sizeof(*m), GFP_KERNEL);
5898 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005899 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005900 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5901 if (!iter) {
5902 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005903 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005904 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005905 ret = 0;
5906
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005907 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005908 iter->trace_buffer = &tr->max_buffer;
5909 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005910 m->private = iter;
5911 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005912 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005913out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005914 if (ret < 0)
5915 trace_array_put(tr);
5916
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005917 return ret;
5918}
5919
5920static ssize_t
5921tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5922 loff_t *ppos)
5923{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005924 struct seq_file *m = filp->private_data;
5925 struct trace_iterator *iter = m->private;
5926 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005927 unsigned long val;
5928 int ret;
5929
5930 ret = tracing_update_buffers();
5931 if (ret < 0)
5932 return ret;
5933
5934 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5935 if (ret)
5936 return ret;
5937
5938 mutex_lock(&trace_types_lock);
5939
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005940 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005941 ret = -EBUSY;
5942 goto out;
5943 }
5944
5945 switch (val) {
5946 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005947 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5948 ret = -EINVAL;
5949 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005950 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005951 if (tr->allocated_snapshot)
5952 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005953 break;
5954 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005955/* Only allow per-cpu swap if the ring buffer supports it */
5956#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5957 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5958 ret = -EINVAL;
5959 break;
5960 }
5961#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005962 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005963 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005964 if (ret < 0)
5965 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005966 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005967 local_irq_disable();
5968 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005969 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005970 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005971 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005972 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005973 local_irq_enable();
5974 break;
5975 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005976 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005977 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5978 tracing_reset_online_cpus(&tr->max_buffer);
5979 else
5980 tracing_reset(&tr->max_buffer, iter->cpu_file);
5981 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005982 break;
5983 }
5984
5985 if (ret >= 0) {
5986 *ppos += cnt;
5987 ret = cnt;
5988 }
5989out:
5990 mutex_unlock(&trace_types_lock);
5991 return ret;
5992}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005993
5994static int tracing_snapshot_release(struct inode *inode, struct file *file)
5995{
5996 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005997 int ret;
5998
5999 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006000
6001 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006002 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006003
6004 /* If write only, the seq_file is just a stub */
6005 if (m)
6006 kfree(m->private);
6007 kfree(m);
6008
6009 return 0;
6010}
6011
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006012static int tracing_buffers_open(struct inode *inode, struct file *filp);
6013static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6014 size_t count, loff_t *ppos);
6015static int tracing_buffers_release(struct inode *inode, struct file *file);
6016static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6017 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6018
6019static int snapshot_raw_open(struct inode *inode, struct file *filp)
6020{
6021 struct ftrace_buffer_info *info;
6022 int ret;
6023
6024 ret = tracing_buffers_open(inode, filp);
6025 if (ret < 0)
6026 return ret;
6027
6028 info = filp->private_data;
6029
6030 if (info->iter.trace->use_max_tr) {
6031 tracing_buffers_release(inode, filp);
6032 return -EBUSY;
6033 }
6034
6035 info->iter.snapshot = true;
6036 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6037
6038 return ret;
6039}
6040
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006041#endif /* CONFIG_TRACER_SNAPSHOT */
6042
6043
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006044static const struct file_operations tracing_thresh_fops = {
6045 .open = tracing_open_generic,
6046 .read = tracing_thresh_read,
6047 .write = tracing_thresh_write,
6048 .llseek = generic_file_llseek,
6049};
6050
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006051#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006052static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006053 .open = tracing_open_generic,
6054 .read = tracing_max_lat_read,
6055 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006056 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006057};
Chen Gange428abb2015-11-10 05:15:15 +08006058#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006059
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006060static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006061 .open = tracing_open_generic,
6062 .read = tracing_set_trace_read,
6063 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006064 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006065};
6066
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006067static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006068 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006069 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006070 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006071 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006072 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006073 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006074};
6075
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006076static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006077 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006078 .read = tracing_entries_read,
6079 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006080 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006081 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006082};
6083
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006084static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006085 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006086 .read = tracing_total_entries_read,
6087 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006088 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006089};
6090
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006091static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006092 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006093 .write = tracing_free_buffer_write,
6094 .release = tracing_free_buffer_release,
6095};
6096
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006097static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006098 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006099 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006100 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006101 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006102};
6103
Zhaolei5079f322009-08-25 16:12:56 +08006104static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006105 .open = tracing_clock_open,
6106 .read = seq_read,
6107 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006108 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006109 .write = tracing_clock_write,
6110};
6111
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006112#ifdef CONFIG_TRACER_SNAPSHOT
6113static const struct file_operations snapshot_fops = {
6114 .open = tracing_snapshot_open,
6115 .read = seq_read,
6116 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006117 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006118 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006119};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006120
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006121static const struct file_operations snapshot_raw_fops = {
6122 .open = snapshot_raw_open,
6123 .read = tracing_buffers_read,
6124 .release = tracing_buffers_release,
6125 .splice_read = tracing_buffers_splice_read,
6126 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006127};
6128
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006129#endif /* CONFIG_TRACER_SNAPSHOT */
6130
Steven Rostedt2cadf912008-12-01 22:20:19 -05006131static int tracing_buffers_open(struct inode *inode, struct file *filp)
6132{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006133 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006134 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006135 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006136
6137 if (tracing_disabled)
6138 return -ENODEV;
6139
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006140 if (trace_array_get(tr) < 0)
6141 return -ENODEV;
6142
Steven Rostedt2cadf912008-12-01 22:20:19 -05006143 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006144 if (!info) {
6145 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006146 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006147 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006148
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006149 mutex_lock(&trace_types_lock);
6150
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006151 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006152 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006153 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006154 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006155 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006156 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006157 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006158
6159 filp->private_data = info;
6160
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006161 tr->current_trace->ref++;
6162
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006163 mutex_unlock(&trace_types_lock);
6164
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006165 ret = nonseekable_open(inode, filp);
6166 if (ret < 0)
6167 trace_array_put(tr);
6168
6169 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006170}
6171
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006172static unsigned int
6173tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6174{
6175 struct ftrace_buffer_info *info = filp->private_data;
6176 struct trace_iterator *iter = &info->iter;
6177
6178 return trace_poll(iter, filp, poll_table);
6179}
6180
Steven Rostedt2cadf912008-12-01 22:20:19 -05006181static ssize_t
6182tracing_buffers_read(struct file *filp, char __user *ubuf,
6183 size_t count, loff_t *ppos)
6184{
6185 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006186 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006187 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006188 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006189
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006190 if (!count)
6191 return 0;
6192
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006193#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006194 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6195 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006196#endif
6197
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006198 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006199 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6200 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006201 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006202 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006203
Steven Rostedt2cadf912008-12-01 22:20:19 -05006204 /* Do we have previous read data to read? */
6205 if (info->read < PAGE_SIZE)
6206 goto read;
6207
Steven Rostedtb6273442013-02-28 13:44:11 -05006208 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006209 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006210 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006211 &info->spare,
6212 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006213 iter->cpu_file, 0);
6214 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006215
6216 if (ret < 0) {
6217 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006218 if ((filp->f_flags & O_NONBLOCK))
6219 return -EAGAIN;
6220
Rabin Vincente30f53a2014-11-10 19:46:34 +01006221 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006222 if (ret)
6223 return ret;
6224
Steven Rostedtb6273442013-02-28 13:44:11 -05006225 goto again;
6226 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006227 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006228 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006229
Steven Rostedt436fc282011-10-14 10:44:25 -04006230 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006231 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006232 size = PAGE_SIZE - info->read;
6233 if (size > count)
6234 size = count;
6235
6236 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006237 if (ret == size)
6238 return -EFAULT;
6239
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006240 size -= ret;
6241
Steven Rostedt2cadf912008-12-01 22:20:19 -05006242 *ppos += size;
6243 info->read += size;
6244
6245 return size;
6246}
6247
6248static int tracing_buffers_release(struct inode *inode, struct file *file)
6249{
6250 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006251 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006252
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006253 mutex_lock(&trace_types_lock);
6254
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006255 iter->tr->current_trace->ref--;
6256
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006257 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006258
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006259 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006260 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006261 kfree(info);
6262
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006263 mutex_unlock(&trace_types_lock);
6264
Steven Rostedt2cadf912008-12-01 22:20:19 -05006265 return 0;
6266}
6267
6268struct buffer_ref {
6269 struct ring_buffer *buffer;
6270 void *page;
6271 int ref;
6272};
6273
6274static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6275 struct pipe_buffer *buf)
6276{
6277 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6278
6279 if (--ref->ref)
6280 return;
6281
6282 ring_buffer_free_read_page(ref->buffer, ref->page);
6283 kfree(ref);
6284 buf->private = 0;
6285}
6286
Steven Rostedt2cadf912008-12-01 22:20:19 -05006287static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6288 struct pipe_buffer *buf)
6289{
6290 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6291
6292 ref->ref++;
6293}
6294
6295/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006296static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006297 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006298 .confirm = generic_pipe_buf_confirm,
6299 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006300 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006301 .get = buffer_pipe_buf_get,
6302};
6303
6304/*
6305 * Callback from splice_to_pipe(), if we need to release some pages
6306 * at the end of the spd in case we error'ed out in filling the pipe.
6307 */
6308static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6309{
6310 struct buffer_ref *ref =
6311 (struct buffer_ref *)spd->partial[i].private;
6312
6313 if (--ref->ref)
6314 return;
6315
6316 ring_buffer_free_read_page(ref->buffer, ref->page);
6317 kfree(ref);
6318 spd->partial[i].private = 0;
6319}
6320
6321static ssize_t
6322tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6323 struct pipe_inode_info *pipe, size_t len,
6324 unsigned int flags)
6325{
6326 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006327 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006328 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6329 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006330 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006331 .pages = pages_def,
6332 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006333 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006334 .flags = flags,
6335 .ops = &buffer_pipe_buf_ops,
6336 .spd_release = buffer_spd_release,
6337 };
6338 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04006339 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006340 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006341
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006342#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006343 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6344 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006345#endif
6346
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006347 if (*ppos & (PAGE_SIZE - 1))
6348 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006349
6350 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006351 if (len < PAGE_SIZE)
6352 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006353 len &= PAGE_MASK;
6354 }
6355
Al Viro1ae22932016-09-17 18:31:46 -04006356 if (splice_grow_spd(pipe, &spd))
6357 return -ENOMEM;
6358
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006359 again:
6360 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006361 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006362
Al Viroa786c062014-04-11 12:01:03 -04006363 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006364 struct page *page;
6365 int r;
6366
6367 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006368 if (!ref) {
6369 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006370 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006371 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006372
Steven Rostedt7267fa62009-04-29 00:16:21 -04006373 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006374 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006375 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006376 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006377 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006378 kfree(ref);
6379 break;
6380 }
6381
6382 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006383 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006384 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006385 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006386 kfree(ref);
6387 break;
6388 }
6389
6390 /*
6391 * zero out any left over data, this is going to
6392 * user land.
6393 */
6394 size = ring_buffer_page_len(ref->page);
6395 if (size < PAGE_SIZE)
6396 memset(ref->page + size, 0, PAGE_SIZE - size);
6397
6398 page = virt_to_page(ref->page);
6399
6400 spd.pages[i] = page;
6401 spd.partial[i].len = PAGE_SIZE;
6402 spd.partial[i].offset = 0;
6403 spd.partial[i].private = (unsigned long)ref;
6404 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006405 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006406
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006407 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006408 }
6409
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006410 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006411 spd.nr_pages = i;
6412
6413 /* did we read anything? */
6414 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006415 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006416 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006417
Al Viro1ae22932016-09-17 18:31:46 -04006418 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006419 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006420 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006421
Rabin Vincente30f53a2014-11-10 19:46:34 +01006422 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006423 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006424 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006425
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006426 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006427 }
6428
6429 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006430out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006431 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006432
Steven Rostedt2cadf912008-12-01 22:20:19 -05006433 return ret;
6434}
6435
6436static const struct file_operations tracing_buffers_fops = {
6437 .open = tracing_buffers_open,
6438 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006439 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006440 .release = tracing_buffers_release,
6441 .splice_read = tracing_buffers_splice_read,
6442 .llseek = no_llseek,
6443};
6444
Steven Rostedtc8d77182009-04-29 18:03:45 -04006445static ssize_t
6446tracing_stats_read(struct file *filp, char __user *ubuf,
6447 size_t count, loff_t *ppos)
6448{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006449 struct inode *inode = file_inode(filp);
6450 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006451 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006452 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006453 struct trace_seq *s;
6454 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006455 unsigned long long t;
6456 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006457
Li Zefane4f2d102009-06-15 10:57:28 +08006458 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006459 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006460 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006461
6462 trace_seq_init(s);
6463
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006464 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006465 trace_seq_printf(s, "entries: %ld\n", cnt);
6466
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006467 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006468 trace_seq_printf(s, "overrun: %ld\n", cnt);
6469
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006470 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006471 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6472
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006473 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006474 trace_seq_printf(s, "bytes: %ld\n", cnt);
6475
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006476 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006477 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006478 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006479 usec_rem = do_div(t, USEC_PER_SEC);
6480 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6481 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006482
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006483 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006484 usec_rem = do_div(t, USEC_PER_SEC);
6485 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6486 } else {
6487 /* counter or tsc mode for trace_clock */
6488 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006489 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006490
6491 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006492 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006493 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006494
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006495 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006496 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6497
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006498 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006499 trace_seq_printf(s, "read events: %ld\n", cnt);
6500
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006501 count = simple_read_from_buffer(ubuf, count, ppos,
6502 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006503
6504 kfree(s);
6505
6506 return count;
6507}
6508
6509static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006510 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006511 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006512 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006513 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006514};
6515
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006516#ifdef CONFIG_DYNAMIC_FTRACE
6517
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006518int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006519{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006520 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006521}
6522
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006523static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006524tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006525 size_t cnt, loff_t *ppos)
6526{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006527 static char ftrace_dyn_info_buffer[1024];
6528 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006529 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006530 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006531 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006532 int r;
6533
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006534 mutex_lock(&dyn_info_mutex);
6535 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006536
Steven Rostedta26a2a22008-10-31 00:03:22 -04006537 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006538 buf[r++] = '\n';
6539
6540 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6541
6542 mutex_unlock(&dyn_info_mutex);
6543
6544 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006545}
6546
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006547static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006548 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006549 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006550 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006551};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006552#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006553
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006554#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6555static void
6556ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006557{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006558 tracing_snapshot();
6559}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006560
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006561static void
6562ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6563{
6564 unsigned long *count = (long *)data;
6565
6566 if (!*count)
6567 return;
6568
6569 if (*count != -1)
6570 (*count)--;
6571
6572 tracing_snapshot();
6573}
6574
6575static int
6576ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6577 struct ftrace_probe_ops *ops, void *data)
6578{
6579 long count = (long)data;
6580
6581 seq_printf(m, "%ps:", (void *)ip);
6582
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006583 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006584
6585 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006586 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006587 else
6588 seq_printf(m, ":count=%ld\n", count);
6589
6590 return 0;
6591}
6592
6593static struct ftrace_probe_ops snapshot_probe_ops = {
6594 .func = ftrace_snapshot,
6595 .print = ftrace_snapshot_print,
6596};
6597
6598static struct ftrace_probe_ops snapshot_count_probe_ops = {
6599 .func = ftrace_count_snapshot,
6600 .print = ftrace_snapshot_print,
6601};
6602
6603static int
6604ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6605 char *glob, char *cmd, char *param, int enable)
6606{
6607 struct ftrace_probe_ops *ops;
6608 void *count = (void *)-1;
6609 char *number;
6610 int ret;
6611
6612 /* hash funcs only work with set_ftrace_filter */
6613 if (!enable)
6614 return -EINVAL;
6615
6616 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6617
6618 if (glob[0] == '!') {
6619 unregister_ftrace_function_probe_func(glob+1, ops);
6620 return 0;
6621 }
6622
6623 if (!param)
6624 goto out_reg;
6625
6626 number = strsep(&param, ":");
6627
6628 if (!strlen(number))
6629 goto out_reg;
6630
6631 /*
6632 * We use the callback data field (which is a pointer)
6633 * as our counter.
6634 */
6635 ret = kstrtoul(number, 0, (unsigned long *)&count);
6636 if (ret)
6637 return ret;
6638
6639 out_reg:
Steven Rostedt (VMware)d4decac2017-04-19 12:07:08 -04006640 ret = alloc_snapshot(&global_trace);
6641 if (ret < 0)
6642 goto out;
6643
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006644 ret = register_ftrace_function_probe(glob, ops, count);
6645
Steven Rostedt (VMware)d4decac2017-04-19 12:07:08 -04006646 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006647 return ret < 0 ? ret : 0;
6648}
6649
6650static struct ftrace_func_command ftrace_snapshot_cmd = {
6651 .name = "snapshot",
6652 .func = ftrace_trace_snapshot_callback,
6653};
6654
Tom Zanussi38de93a2013-10-24 08:34:18 -05006655static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006656{
6657 return register_ftrace_command(&ftrace_snapshot_cmd);
6658}
6659#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006660static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006661#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006662
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006663static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006664{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006665 if (WARN_ON(!tr->dir))
6666 return ERR_PTR(-ENODEV);
6667
6668 /* Top directory uses NULL as the parent */
6669 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6670 return NULL;
6671
6672 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006673 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006674}
6675
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006676static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6677{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006678 struct dentry *d_tracer;
6679
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006680 if (tr->percpu_dir)
6681 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006682
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006683 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006684 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006685 return NULL;
6686
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006687 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006688
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006689 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006690 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006691
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006692 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006693}
6694
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006695static struct dentry *
6696trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6697 void *data, long cpu, const struct file_operations *fops)
6698{
6699 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6700
6701 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006702 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006703 return ret;
6704}
6705
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006706static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006707tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006708{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006709 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006710 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006711 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006712
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006713 if (!d_percpu)
6714 return;
6715
Steven Rostedtdd49a382010-10-20 21:51:26 -04006716 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006717 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006718 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006719 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006720 return;
6721 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006722
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006723 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006724 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006725 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006726
6727 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006728 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006729 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006730
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006731 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006732 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006733
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006734 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006735 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006736
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006737 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006738 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006739
6740#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006741 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006742 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006743
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006744 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006745 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006746#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006747}
6748
Steven Rostedt60a11772008-05-12 21:20:44 +02006749#ifdef CONFIG_FTRACE_SELFTEST
6750/* Let selftest have access to static functions in this file */
6751#include "trace_selftest.c"
6752#endif
6753
Steven Rostedt577b7852009-02-26 23:43:05 -05006754static ssize_t
6755trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6756 loff_t *ppos)
6757{
6758 struct trace_option_dentry *topt = filp->private_data;
6759 char *buf;
6760
6761 if (topt->flags->val & topt->opt->bit)
6762 buf = "1\n";
6763 else
6764 buf = "0\n";
6765
6766 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6767}
6768
6769static ssize_t
6770trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6771 loff_t *ppos)
6772{
6773 struct trace_option_dentry *topt = filp->private_data;
6774 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006775 int ret;
6776
Peter Huewe22fe9b52011-06-07 21:58:27 +02006777 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6778 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006779 return ret;
6780
Li Zefan8d18eaa2009-12-08 11:17:06 +08006781 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006782 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006783
6784 if (!!(topt->flags->val & topt->opt->bit) != val) {
6785 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006786 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006787 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006788 mutex_unlock(&trace_types_lock);
6789 if (ret)
6790 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006791 }
6792
6793 *ppos += cnt;
6794
6795 return cnt;
6796}
6797
6798
6799static const struct file_operations trace_options_fops = {
6800 .open = tracing_open_generic,
6801 .read = trace_options_read,
6802 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006803 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006804};
6805
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006806/*
6807 * In order to pass in both the trace_array descriptor as well as the index
6808 * to the flag that the trace option file represents, the trace_array
6809 * has a character array of trace_flags_index[], which holds the index
6810 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6811 * The address of this character array is passed to the flag option file
6812 * read/write callbacks.
6813 *
6814 * In order to extract both the index and the trace_array descriptor,
6815 * get_tr_index() uses the following algorithm.
6816 *
6817 * idx = *ptr;
6818 *
6819 * As the pointer itself contains the address of the index (remember
6820 * index[1] == 1).
6821 *
6822 * Then to get the trace_array descriptor, by subtracting that index
6823 * from the ptr, we get to the start of the index itself.
6824 *
6825 * ptr - idx == &index[0]
6826 *
6827 * Then a simple container_of() from that pointer gets us to the
6828 * trace_array descriptor.
6829 */
6830static void get_tr_index(void *data, struct trace_array **ptr,
6831 unsigned int *pindex)
6832{
6833 *pindex = *(unsigned char *)data;
6834
6835 *ptr = container_of(data - *pindex, struct trace_array,
6836 trace_flags_index);
6837}
6838
Steven Rostedta8259072009-02-26 22:19:12 -05006839static ssize_t
6840trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6841 loff_t *ppos)
6842{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006843 void *tr_index = filp->private_data;
6844 struct trace_array *tr;
6845 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006846 char *buf;
6847
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006848 get_tr_index(tr_index, &tr, &index);
6849
6850 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006851 buf = "1\n";
6852 else
6853 buf = "0\n";
6854
6855 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6856}
6857
6858static ssize_t
6859trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6860 loff_t *ppos)
6861{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006862 void *tr_index = filp->private_data;
6863 struct trace_array *tr;
6864 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006865 unsigned long val;
6866 int ret;
6867
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006868 get_tr_index(tr_index, &tr, &index);
6869
Peter Huewe22fe9b52011-06-07 21:58:27 +02006870 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6871 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006872 return ret;
6873
Zhaoleif2d84b62009-08-07 18:55:48 +08006874 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006875 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006876
6877 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006878 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006879 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006880
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006881 if (ret < 0)
6882 return ret;
6883
Steven Rostedta8259072009-02-26 22:19:12 -05006884 *ppos += cnt;
6885
6886 return cnt;
6887}
6888
Steven Rostedta8259072009-02-26 22:19:12 -05006889static const struct file_operations trace_options_core_fops = {
6890 .open = tracing_open_generic,
6891 .read = trace_options_core_read,
6892 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006893 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006894};
6895
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006896struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006897 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006898 struct dentry *parent,
6899 void *data,
6900 const struct file_operations *fops)
6901{
6902 struct dentry *ret;
6903
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006904 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006905 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07006906 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006907
6908 return ret;
6909}
6910
6911
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006912static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006913{
6914 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006915
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006916 if (tr->options)
6917 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006918
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006919 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006920 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006921 return NULL;
6922
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006923 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006924 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006925 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006926 return NULL;
6927 }
6928
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006929 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006930}
6931
Steven Rostedt577b7852009-02-26 23:43:05 -05006932static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006933create_trace_option_file(struct trace_array *tr,
6934 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006935 struct tracer_flags *flags,
6936 struct tracer_opt *opt)
6937{
6938 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006939
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006940 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006941 if (!t_options)
6942 return;
6943
6944 topt->flags = flags;
6945 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006946 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006947
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006948 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006949 &trace_options_fops);
6950
Steven Rostedt577b7852009-02-26 23:43:05 -05006951}
6952
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006953static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006954create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006955{
6956 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006957 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006958 struct tracer_flags *flags;
6959 struct tracer_opt *opts;
6960 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006961 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006962
6963 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006964 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006965
6966 flags = tracer->flags;
6967
6968 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006969 return;
6970
6971 /*
6972 * If this is an instance, only create flags for tracers
6973 * the instance may have.
6974 */
6975 if (!trace_ok_for_array(tracer, tr))
6976 return;
6977
6978 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08006979 /* Make sure there's no duplicate flags. */
6980 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006981 return;
6982 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006983
6984 opts = flags->opts;
6985
6986 for (cnt = 0; opts[cnt].name; cnt++)
6987 ;
6988
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006989 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006990 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006991 return;
6992
6993 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6994 GFP_KERNEL);
6995 if (!tr_topts) {
6996 kfree(topts);
6997 return;
6998 }
6999
7000 tr->topts = tr_topts;
7001 tr->topts[tr->nr_topts].tracer = tracer;
7002 tr->topts[tr->nr_topts].topts = topts;
7003 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007004
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007005 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007006 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007007 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007008 WARN_ONCE(topts[cnt].entry == NULL,
7009 "Failed to create trace option: %s",
7010 opts[cnt].name);
7011 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007012}
7013
Steven Rostedta8259072009-02-26 22:19:12 -05007014static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007015create_trace_option_core_file(struct trace_array *tr,
7016 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007017{
7018 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007019
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007020 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007021 if (!t_options)
7022 return NULL;
7023
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007024 return trace_create_file(option, 0644, t_options,
7025 (void *)&tr->trace_flags_index[index],
7026 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007027}
7028
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007029static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007030{
7031 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007032 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007033 int i;
7034
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007035 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007036 if (!t_options)
7037 return;
7038
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007039 for (i = 0; trace_options[i]; i++) {
7040 if (top_level ||
7041 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7042 create_trace_option_core_file(tr, trace_options[i], i);
7043 }
Steven Rostedta8259072009-02-26 22:19:12 -05007044}
7045
Steven Rostedt499e5472012-02-22 15:50:28 -05007046static ssize_t
7047rb_simple_read(struct file *filp, char __user *ubuf,
7048 size_t cnt, loff_t *ppos)
7049{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007050 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007051 char buf[64];
7052 int r;
7053
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007054 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007055 r = sprintf(buf, "%d\n", r);
7056
7057 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7058}
7059
7060static ssize_t
7061rb_simple_write(struct file *filp, const char __user *ubuf,
7062 size_t cnt, loff_t *ppos)
7063{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007064 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007065 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007066 unsigned long val;
7067 int ret;
7068
7069 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7070 if (ret)
7071 return ret;
7072
7073 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007074 mutex_lock(&trace_types_lock);
7075 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007076 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007077 if (tr->current_trace->start)
7078 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007079 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007080 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007081 if (tr->current_trace->stop)
7082 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007083 }
7084 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007085 }
7086
7087 (*ppos)++;
7088
7089 return cnt;
7090}
7091
7092static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007093 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007094 .read = rb_simple_read,
7095 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007096 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007097 .llseek = default_llseek,
7098};
7099
Steven Rostedt277ba042012-08-03 16:10:49 -04007100struct dentry *trace_instance_dir;
7101
7102static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007103init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007104
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007105static int
7106allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007107{
7108 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007109
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007110 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007111
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007112 buf->tr = tr;
7113
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007114 buf->buffer = ring_buffer_alloc(size, rb_flags);
7115 if (!buf->buffer)
7116 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007117
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007118 buf->data = alloc_percpu(struct trace_array_cpu);
7119 if (!buf->data) {
7120 ring_buffer_free(buf->buffer);
7121 return -ENOMEM;
7122 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007123
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007124 /* Allocate the first page for all buffers */
7125 set_buffer_entries(&tr->trace_buffer,
7126 ring_buffer_size(tr->trace_buffer.buffer, 0));
7127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007128 return 0;
7129}
7130
7131static int allocate_trace_buffers(struct trace_array *tr, int size)
7132{
7133 int ret;
7134
7135 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7136 if (ret)
7137 return ret;
7138
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007139#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007140 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7141 allocate_snapshot ? size : 1);
7142 if (WARN_ON(ret)) {
7143 ring_buffer_free(tr->trace_buffer.buffer);
7144 free_percpu(tr->trace_buffer.data);
7145 return -ENOMEM;
7146 }
7147 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007148
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007149 /*
7150 * Only the top level trace array gets its snapshot allocated
7151 * from the kernel command line.
7152 */
7153 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007154#endif
7155 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007156}
7157
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007158static void free_trace_buffer(struct trace_buffer *buf)
7159{
7160 if (buf->buffer) {
7161 ring_buffer_free(buf->buffer);
7162 buf->buffer = NULL;
7163 free_percpu(buf->data);
7164 buf->data = NULL;
7165 }
7166}
7167
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007168static void free_trace_buffers(struct trace_array *tr)
7169{
7170 if (!tr)
7171 return;
7172
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007173 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007174
7175#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007176 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007177#endif
7178}
7179
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007180static void init_trace_flags_index(struct trace_array *tr)
7181{
7182 int i;
7183
7184 /* Used by the trace options files */
7185 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7186 tr->trace_flags_index[i] = i;
7187}
7188
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007189static void __update_tracer_options(struct trace_array *tr)
7190{
7191 struct tracer *t;
7192
7193 for (t = trace_types; t; t = t->next)
7194 add_tracer_options(tr, t);
7195}
7196
7197static void update_tracer_options(struct trace_array *tr)
7198{
7199 mutex_lock(&trace_types_lock);
7200 __update_tracer_options(tr);
7201 mutex_unlock(&trace_types_lock);
7202}
7203
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007204static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007205{
Steven Rostedt277ba042012-08-03 16:10:49 -04007206 struct trace_array *tr;
7207 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007208
7209 mutex_lock(&trace_types_lock);
7210
7211 ret = -EEXIST;
7212 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7213 if (tr->name && strcmp(tr->name, name) == 0)
7214 goto out_unlock;
7215 }
7216
7217 ret = -ENOMEM;
7218 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7219 if (!tr)
7220 goto out_unlock;
7221
7222 tr->name = kstrdup(name, GFP_KERNEL);
7223 if (!tr->name)
7224 goto out_free_tr;
7225
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007226 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7227 goto out_free_tr;
7228
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007229 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007230
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007231 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7232
Steven Rostedt277ba042012-08-03 16:10:49 -04007233 raw_spin_lock_init(&tr->start_lock);
7234
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007235 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7236
Steven Rostedt277ba042012-08-03 16:10:49 -04007237 tr->current_trace = &nop_trace;
7238
7239 INIT_LIST_HEAD(&tr->systems);
7240 INIT_LIST_HEAD(&tr->events);
7241
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007242 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007243 goto out_free_tr;
7244
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007245 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007246 if (!tr->dir)
7247 goto out_free_tr;
7248
7249 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007250 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007251 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007252 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007253 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007254
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007255 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007256 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007257 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007258
7259 list_add(&tr->list, &ftrace_trace_arrays);
7260
7261 mutex_unlock(&trace_types_lock);
7262
7263 return 0;
7264
7265 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007266 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007267 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007268 kfree(tr->name);
7269 kfree(tr);
7270
7271 out_unlock:
7272 mutex_unlock(&trace_types_lock);
7273
7274 return ret;
7275
7276}
7277
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007278static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007279{
7280 struct trace_array *tr;
7281 int found = 0;
7282 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007283 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007284
7285 mutex_lock(&trace_types_lock);
7286
7287 ret = -ENODEV;
7288 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7289 if (tr->name && strcmp(tr->name, name) == 0) {
7290 found = 1;
7291 break;
7292 }
7293 }
7294 if (!found)
7295 goto out_unlock;
7296
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007297 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007298 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007299 goto out_unlock;
7300
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007301 list_del(&tr->list);
7302
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007303 /* Disable all the flags that were enabled coming in */
7304 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7305 if ((1 << i) & ZEROED_TRACE_FLAGS)
7306 set_tracer_flag(tr, 1 << i, 0);
7307 }
7308
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007309 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007310 event_trace_del_tracer(tr);
Namhyung Kim7da0f8e2017-04-17 11:44:27 +09007311 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007312 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007313 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007314 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007315
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007316 for (i = 0; i < tr->nr_topts; i++) {
7317 kfree(tr->topts[i].topts);
7318 }
7319 kfree(tr->topts);
7320
Chunyu Hu919e4812017-07-20 18:36:09 +08007321 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007322 kfree(tr->name);
7323 kfree(tr);
7324
7325 ret = 0;
7326
7327 out_unlock:
7328 mutex_unlock(&trace_types_lock);
7329
7330 return ret;
7331}
7332
Steven Rostedt277ba042012-08-03 16:10:49 -04007333static __init void create_trace_instances(struct dentry *d_tracer)
7334{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007335 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7336 instance_mkdir,
7337 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007338 if (WARN_ON(!trace_instance_dir))
7339 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007340}
7341
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007342static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007343init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007344{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007345 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007346
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007347 trace_create_file("available_tracers", 0444, d_tracer,
7348 tr, &show_traces_fops);
7349
7350 trace_create_file("current_tracer", 0644, d_tracer,
7351 tr, &set_tracer_fops);
7352
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007353 trace_create_file("tracing_cpumask", 0644, d_tracer,
7354 tr, &tracing_cpumask_fops);
7355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007356 trace_create_file("trace_options", 0644, d_tracer,
7357 tr, &tracing_iter_fops);
7358
7359 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007360 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007361
7362 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007363 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007364
7365 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007366 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007367
7368 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7369 tr, &tracing_total_entries_fops);
7370
Wang YanQing238ae932013-05-26 16:52:01 +08007371 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007372 tr, &tracing_free_buffer_fops);
7373
7374 trace_create_file("trace_marker", 0220, d_tracer,
7375 tr, &tracing_mark_fops);
7376
Jamie Gennis13b625d2012-11-21 15:04:25 -08007377 trace_create_file("saved_tgids", 0444, d_tracer,
7378 tr, &tracing_saved_tgids_fops);
7379
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007380 trace_create_file("trace_clock", 0644, d_tracer, tr,
7381 &trace_clock_fops);
7382
7383 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007384 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007385
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007386 create_trace_options_dir(tr);
7387
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007388#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007389 trace_create_file("tracing_max_latency", 0644, d_tracer,
7390 &tr->max_latency, &tracing_max_lat_fops);
7391#endif
7392
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007393 if (ftrace_create_function_files(tr, d_tracer))
7394 WARN(1, "Could not allocate function filter files");
7395
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007396#ifdef CONFIG_TRACER_SNAPSHOT
7397 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007398 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007399#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007400
7401 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007402 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007403
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007404 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007405}
7406
Eric W. Biedermand3381fa2017-02-01 06:06:16 +13007407static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007408{
7409 struct vfsmount *mnt;
7410 struct file_system_type *type;
7411
7412 /*
7413 * To maintain backward compatibility for tools that mount
7414 * debugfs to get to the tracing facility, tracefs is automatically
7415 * mounted to the debugfs/tracing directory.
7416 */
7417 type = get_fs_type("tracefs");
7418 if (!type)
7419 return NULL;
Eric W. Biedermand3381fa2017-02-01 06:06:16 +13007420 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007421 put_filesystem(type);
7422 if (IS_ERR(mnt))
7423 return NULL;
7424 mntget(mnt);
7425
7426 return mnt;
7427}
7428
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007429/**
7430 * tracing_init_dentry - initialize top level trace array
7431 *
7432 * This is called when creating files or directories in the tracing
7433 * directory. It is called via fs_initcall() by any of the boot up code
7434 * and expects to return the dentry of the top level tracing directory.
7435 */
7436struct dentry *tracing_init_dentry(void)
7437{
7438 struct trace_array *tr = &global_trace;
7439
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007440 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007441 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007442 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007443
Jiaxing Wang8b129192015-11-06 16:04:16 +08007444 if (WARN_ON(!tracefs_initialized()) ||
7445 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7446 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007447 return ERR_PTR(-ENODEV);
7448
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007449 /*
7450 * As there may still be users that expect the tracing
7451 * files to exist in debugfs/tracing, we must automount
7452 * the tracefs file system there, so older tools still
7453 * work with the newer kerenl.
7454 */
7455 tr->dir = debugfs_create_automount("tracing", NULL,
7456 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007457 if (!tr->dir) {
7458 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7459 return ERR_PTR(-ENOMEM);
7460 }
7461
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007462 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007463}
7464
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007465extern struct trace_enum_map *__start_ftrace_enum_maps[];
7466extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7467
7468static void __init trace_enum_init(void)
7469{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007470 int len;
7471
7472 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007473 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007474}
7475
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007476#ifdef CONFIG_MODULES
7477static void trace_module_add_enums(struct module *mod)
7478{
7479 if (!mod->num_trace_enums)
7480 return;
7481
7482 /*
7483 * Modules with bad taint do not have events created, do
7484 * not bother with enums either.
7485 */
7486 if (trace_module_has_bad_taint(mod))
7487 return;
7488
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007489 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007490}
7491
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007492#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7493static void trace_module_remove_enums(struct module *mod)
7494{
7495 union trace_enum_map_item *map;
7496 union trace_enum_map_item **last = &trace_enum_maps;
7497
7498 if (!mod->num_trace_enums)
7499 return;
7500
7501 mutex_lock(&trace_enum_mutex);
7502
7503 map = trace_enum_maps;
7504
7505 while (map) {
7506 if (map->head.mod == mod)
7507 break;
7508 map = trace_enum_jmp_to_tail(map);
7509 last = &map->tail.next;
7510 map = map->tail.next;
7511 }
7512 if (!map)
7513 goto out;
7514
7515 *last = trace_enum_jmp_to_tail(map)->tail.next;
7516 kfree(map);
7517 out:
7518 mutex_unlock(&trace_enum_mutex);
7519}
7520#else
7521static inline void trace_module_remove_enums(struct module *mod) { }
7522#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7523
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007524static int trace_module_notify(struct notifier_block *self,
7525 unsigned long val, void *data)
7526{
7527 struct module *mod = data;
7528
7529 switch (val) {
7530 case MODULE_STATE_COMING:
7531 trace_module_add_enums(mod);
7532 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007533 case MODULE_STATE_GOING:
7534 trace_module_remove_enums(mod);
7535 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007536 }
7537
7538 return 0;
7539}
7540
7541static struct notifier_block trace_module_nb = {
7542 .notifier_call = trace_module_notify,
7543 .priority = 0,
7544};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007545#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007546
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007547static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007548{
7549 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007550
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007551 trace_access_lock_init();
7552
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007553 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007554 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007555 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007556
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007557 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007558 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007559
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007560 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007561 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007562
Li Zefan339ae5d2009-04-17 10:34:30 +08007563 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007564 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007565
Avadh Patel69abe6a2009-04-10 16:04:48 -04007566 trace_create_file("saved_cmdlines", 0444, d_tracer,
7567 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007568
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007569 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7570 NULL, &tracing_saved_cmdlines_size_fops);
7571
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007572 trace_enum_init();
7573
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007574 trace_create_enum_file(d_tracer);
7575
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007576#ifdef CONFIG_MODULES
7577 register_module_notifier(&trace_module_nb);
7578#endif
7579
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007580#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007581 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7582 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007583#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007584
Steven Rostedt277ba042012-08-03 16:10:49 -04007585 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007586
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007587 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007588
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007589 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007590}
7591
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007592static int trace_panic_handler(struct notifier_block *this,
7593 unsigned long event, void *unused)
7594{
Steven Rostedt944ac422008-10-23 19:26:08 -04007595 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007596 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007597 return NOTIFY_OK;
7598}
7599
7600static struct notifier_block trace_panic_notifier = {
7601 .notifier_call = trace_panic_handler,
7602 .next = NULL,
7603 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7604};
7605
7606static int trace_die_handler(struct notifier_block *self,
7607 unsigned long val,
7608 void *data)
7609{
7610 switch (val) {
7611 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007612 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007613 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007614 break;
7615 default:
7616 break;
7617 }
7618 return NOTIFY_OK;
7619}
7620
7621static struct notifier_block trace_die_notifier = {
7622 .notifier_call = trace_die_handler,
7623 .priority = 200
7624};
7625
7626/*
7627 * printk is set to max of 1024, we really don't need it that big.
7628 * Nothing should be printing 1000 characters anyway.
7629 */
7630#define TRACE_MAX_PRINT 1000
7631
7632/*
7633 * Define here KERN_TRACE so that we have one place to modify
7634 * it if we decide to change what log level the ftrace dump
7635 * should be at.
7636 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007637#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007638
Jason Wessel955b61e2010-08-05 09:22:23 -05007639void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007640trace_printk_seq(struct trace_seq *s)
7641{
7642 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007643 if (s->seq.len >= TRACE_MAX_PRINT)
7644 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007645
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007646 /*
7647 * More paranoid code. Although the buffer size is set to
7648 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7649 * an extra layer of protection.
7650 */
7651 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7652 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007653
7654 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007655 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007656
7657 printk(KERN_TRACE "%s", s->buffer);
7658
Steven Rostedtf9520752009-03-02 14:04:40 -05007659 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007660}
7661
Jason Wessel955b61e2010-08-05 09:22:23 -05007662void trace_init_global_iter(struct trace_iterator *iter)
7663{
7664 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007665 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007666 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007667 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007668
7669 if (iter->trace && iter->trace->open)
7670 iter->trace->open(iter);
7671
7672 /* Annotate start of buffers if we had overruns */
7673 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7674 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7675
7676 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7677 if (trace_clocks[iter->tr->clock_id].in_ns)
7678 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007679}
7680
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007681void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007682{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007683 /* use static because iter can be a bit big for the stack */
7684 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007685 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007686 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007687 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007688 unsigned long flags;
7689 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007690
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007691 /* Only allow one dump user at a time. */
7692 if (atomic_inc_return(&dump_running) != 1) {
7693 atomic_dec(&dump_running);
7694 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007695 }
7696
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007697 /*
7698 * Always turn off tracing when we dump.
7699 * We don't need to show trace output of what happens
7700 * between multiple crashes.
7701 *
7702 * If the user does a sysrq-z, then they can re-enable
7703 * tracing with echo 1 > tracing_on.
7704 */
7705 tracing_off();
7706
7707 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007708
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007709 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007710 trace_init_global_iter(&iter);
7711
Steven Rostedtd7690412008-10-01 00:29:53 -04007712 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307713 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007714 }
7715
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007716 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007717
Török Edwinb54d3de2008-11-22 13:28:48 +02007718 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007719 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007720
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007721 switch (oops_dump_mode) {
7722 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007723 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007724 break;
7725 case DUMP_ORIG:
7726 iter.cpu_file = raw_smp_processor_id();
7727 break;
7728 case DUMP_NONE:
7729 goto out_enable;
7730 default:
7731 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007732 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007733 }
7734
7735 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007736
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007737 /* Did function tracer already get disabled? */
7738 if (ftrace_is_dead()) {
7739 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7740 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7741 }
7742
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007743 /*
7744 * We need to stop all tracing on all CPUS to read the
7745 * the next buffer. This is a bit expensive, but is
7746 * not done often. We fill all what we can read,
7747 * and then release the locks again.
7748 */
7749
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007750 while (!trace_empty(&iter)) {
7751
7752 if (!cnt)
7753 printk(KERN_TRACE "---------------------------------\n");
7754
7755 cnt++;
7756
7757 /* reset all but tr, trace, and overruns */
7758 memset(&iter.seq, 0,
7759 sizeof(struct trace_iterator) -
7760 offsetof(struct trace_iterator, seq));
7761 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7762 iter.pos = -1;
7763
Jason Wessel955b61e2010-08-05 09:22:23 -05007764 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007765 int ret;
7766
7767 ret = print_trace_line(&iter);
7768 if (ret != TRACE_TYPE_NO_CONSUME)
7769 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007770 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007771 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007772
7773 trace_printk_seq(&iter.seq);
7774 }
7775
7776 if (!cnt)
7777 printk(KERN_TRACE " (ftrace buffer empty)\n");
7778 else
7779 printk(KERN_TRACE "---------------------------------\n");
7780
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007781 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007782 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007783
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007784 for_each_tracing_cpu(cpu) {
7785 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007786 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007787 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007788 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007789}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007790EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007791
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007792__init static int tracer_alloc_buffers(void)
7793{
Steven Rostedt73c51622009-03-11 13:42:01 -04007794 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307795 int ret = -ENOMEM;
7796
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007797 /*
7798 * Make sure we don't accidently add more trace options
7799 * than we have bits for.
7800 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007801 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007802
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307803 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7804 goto out;
7805
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007806 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307807 goto out_free_buffer_mask;
7808
Steven Rostedt07d777f2011-09-22 14:01:55 -04007809 /* Only allocate trace_printk buffers if a trace_printk exists */
7810 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007811 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007812 trace_printk_init_buffers();
7813
Steven Rostedt73c51622009-03-11 13:42:01 -04007814 /* To save memory, keep the ring buffer size to its minimum */
7815 if (ring_buffer_expanded)
7816 ring_buf_size = trace_buf_size;
7817 else
7818 ring_buf_size = 1;
7819
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307820 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007821 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007822
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007823 raw_spin_lock_init(&global_trace.start_lock);
7824
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007825 /* Used for event triggers */
7826 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7827 if (!temp_buffer)
7828 goto out_free_cpumask;
7829
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007830 if (trace_create_savedcmd() < 0)
7831 goto out_free_temp_buffer;
7832
Steven Rostedtab464282008-05-12 21:21:00 +02007833 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007834 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007835 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7836 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007837 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007838 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007839
Steven Rostedt499e5472012-02-22 15:50:28 -05007840 if (global_trace.buffer_disabled)
7841 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007842
Steven Rostedte1e232c2014-02-10 23:38:46 -05007843 if (trace_boot_clock) {
7844 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7845 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007846 pr_warn("Trace clock %s not defined, going back to default\n",
7847 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05007848 }
7849
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007850 /*
7851 * register_tracer() might reference current_trace, so it
7852 * needs to be set before we register anything. This is
7853 * just a bootstrap of current_trace anyway.
7854 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007855 global_trace.current_trace = &nop_trace;
7856
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007857 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7858
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007859 ftrace_init_global_array_ops(&global_trace);
7860
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007861 init_trace_flags_index(&global_trace);
7862
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007863 register_tracer(&nop_trace);
7864
Steven Rostedt60a11772008-05-12 21:20:44 +02007865 /* All seems OK, enable tracing */
7866 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007867
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007868 atomic_notifier_chain_register(&panic_notifier_list,
7869 &trace_panic_notifier);
7870
7871 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007872
Steven Rostedtae63b312012-05-03 23:09:03 -04007873 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7874
7875 INIT_LIST_HEAD(&global_trace.systems);
7876 INIT_LIST_HEAD(&global_trace.events);
7877 list_add(&global_trace.list, &ftrace_trace_arrays);
7878
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007879 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007880
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007881 register_snapshot_cmd();
7882
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007883 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007884
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007885out_free_savedcmd:
7886 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007887out_free_temp_buffer:
7888 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307889out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007890 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307891out_free_buffer_mask:
7892 free_cpumask_var(tracing_buffer_mask);
7893out:
7894 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007895}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007896
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007897void __init trace_init(void)
7898{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007899 if (tracepoint_printk) {
7900 tracepoint_print_iter =
7901 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7902 if (WARN_ON(!tracepoint_print_iter))
7903 tracepoint_printk = 0;
7904 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007905 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007906 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007907}
7908
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007909__init static int clear_boot_tracer(void)
7910{
7911 /*
7912 * The default tracer at boot buffer is an init section.
7913 * This function is called in lateinit. If we did not
7914 * find the boot tracer, then clear it out, to prevent
7915 * later registration from accessing the buffer that is
7916 * about to be freed.
7917 */
7918 if (!default_bootup_tracer)
7919 return 0;
7920
7921 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7922 default_bootup_tracer);
7923 default_bootup_tracer = NULL;
7924
7925 return 0;
7926}
7927
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007928fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007929late_initcall(clear_boot_tracer);