blob: 60416bf7c591c522494f32081a15e710bdeaf025 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080043#include <linux/trace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060044#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020046#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050047#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010049/*
Steven Rostedt73c51622009-03-11 13:42:01 -040050 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050053bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040054
55/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010059 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * at the same time, giving false positive or negative results.
61 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063
Steven Rostedtb2821ae2009-02-02 21:38:32 -050064/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
Li Zefan020e5f82009-07-01 10:47:05 +080067bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050068
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050069/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050072static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050073
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010074/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050079static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010081{
82 return 0;
83}
Steven Rostedt0f048702008-11-05 16:05:44 -050084
85/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040086 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
90static DEFINE_PER_CPU(bool, trace_cmdline_save);
91
92/*
Steven Rostedt0f048702008-11-05 16:05:44 -050093 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
Hannes Eder4fd27352009-02-10 19:44:12 +010098static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050099
Jason Wessel955b61e2010-08-05 09:22:23 -0500100cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200101
Steven Rostedt944ac422008-10-23 19:26:08 -0400102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400116 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200117
118enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400119
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400123#ifdef CONFIG_TRACE_ENUM_MAP_FILE
124/* Map of enums to their values, for "enum_map" file */
125struct trace_enum_map_head {
126 struct module *mod;
127 unsigned long length;
128};
129
130union trace_enum_map_item;
131
132struct trace_enum_map_tail {
133 /*
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "enum_string"
136 */
137 union trace_enum_map_item *next;
138 const char *end; /* points to NULL */
139};
140
141static DEFINE_MUTEX(trace_enum_mutex);
142
143/*
144 * The trace_enum_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved enum_map items.
149 */
150union trace_enum_map_item {
151 struct trace_enum_map map;
152 struct trace_enum_map_head head;
153 struct trace_enum_map_tail tail;
154};
155
156static union trace_enum_map_item *trace_enum_maps;
157#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
158
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500160
Li Zefanee6c2c12009-09-18 14:06:47 +0800161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100164
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165static bool allocate_snapshot;
166
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200167static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100168{
Chen Gang67012ab2013-04-08 12:06:44 +0800169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500170 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400171 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173 return 1;
174}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176
Steven Rostedt944ac422008-10-23 19:26:08 -0400177static int __init set_ftrace_dump_on_oops(char *str)
178{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200192
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400193static int __init stop_trace_on_warning(char *str)
194{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400197 return 1;
198}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200199__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400201static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400208__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500209
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static int __init set_trace_boot_options(char *str)
214{
Chen Gang67012ab2013-04-08 12:06:44 +0800215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
Steven Rostedte1e232c2014-02-10 23:38:46 -0500220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400238
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800239unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
260 TRACE_ITER_EVENT_FORK
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
269 *
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
273 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400278LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
304void trace_array_put(struct trace_array *this_tr)
305{
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
309}
310
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400311int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
314{
315 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
316 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400317 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500318 return 1;
319 }
320
321 return 0;
322}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500323
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400324void trace_free_pid_list(struct trace_pid_list *pid_list)
325{
326 vfree(pid_list->pids);
327 kfree(pid_list);
328}
329
Steven Rostedtd8275c42016-04-14 12:15:22 -0400330/**
331 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
332 * @filtered_pids: The list of pids to check
333 * @search_pid: The PID to find in @filtered_pids
334 *
335 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
336 */
337bool
338trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
339{
340 /*
341 * If pid_max changed after filtered_pids was created, we
342 * by default ignore all pids greater than the previous pid_max.
343 */
344 if (search_pid >= filtered_pids->pid_max)
345 return false;
346
347 return test_bit(search_pid, filtered_pids->pids);
348}
349
350/**
351 * trace_ignore_this_task - should a task be ignored for tracing
352 * @filtered_pids: The list of pids to check
353 * @task: The task that should be ignored if not filtered
354 *
355 * Checks if @task should be traced or not from @filtered_pids.
356 * Returns true if @task should *NOT* be traced.
357 * Returns false if @task should be traced.
358 */
359bool
360trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
361{
362 /*
363 * Return false, because if filtered_pids does not exist,
364 * all pids are good to trace.
365 */
366 if (!filtered_pids)
367 return false;
368
369 return !trace_find_filtered_pid(filtered_pids, task->pid);
370}
371
372/**
373 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
374 * @pid_list: The list to modify
375 * @self: The current task for fork or NULL for exit
376 * @task: The task to add or remove
377 *
378 * If adding a task, if @self is defined, the task is only added if @self
379 * is also included in @pid_list. This happens on fork and tasks should
380 * only be added when the parent is listed. If @self is NULL, then the
381 * @task pid will be removed from the list, which would happen on exit
382 * of a task.
383 */
384void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
385 struct task_struct *self,
386 struct task_struct *task)
387{
388 if (!pid_list)
389 return;
390
391 /* For forks, we only add if the forking task is listed */
392 if (self) {
393 if (!trace_find_filtered_pid(pid_list, self->pid))
394 return;
395 }
396
397 /* Sorry, but we don't support pid_max changing after setting */
398 if (task->pid >= pid_list->pid_max)
399 return;
400
401 /* "self" is set for forks, and NULL for exits */
402 if (self)
403 set_bit(task->pid, pid_list->pids);
404 else
405 clear_bit(task->pid, pid_list->pids);
406}
407
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400408/**
409 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
410 * @pid_list: The pid list to show
411 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
412 * @pos: The position of the file
413 *
414 * This is used by the seq_file "next" operation to iterate the pids
415 * listed in a trace_pid_list structure.
416 *
417 * Returns the pid+1 as we want to display pid of zero, but NULL would
418 * stop the iteration.
419 */
420void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
421{
422 unsigned long pid = (unsigned long)v;
423
424 (*pos)++;
425
426 /* pid already is +1 of the actual prevous bit */
427 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
428
429 /* Return pid + 1 to allow zero to be represented */
430 if (pid < pid_list->pid_max)
431 return (void *)(pid + 1);
432
433 return NULL;
434}
435
436/**
437 * trace_pid_start - Used for seq_file to start reading pid lists
438 * @pid_list: The pid list to show
439 * @pos: The position of the file
440 *
441 * This is used by seq_file "start" operation to start the iteration
442 * of listing pids.
443 *
444 * Returns the pid+1 as we want to display pid of zero, but NULL would
445 * stop the iteration.
446 */
447void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
448{
449 unsigned long pid;
450 loff_t l = 0;
451
452 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
453 if (pid >= pid_list->pid_max)
454 return NULL;
455
456 /* Return pid + 1 so that zero can be the exit value */
457 for (pid++; pid && l < *pos;
458 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
459 ;
460 return (void *)pid;
461}
462
463/**
464 * trace_pid_show - show the current pid in seq_file processing
465 * @m: The seq_file structure to write into
466 * @v: A void pointer of the pid (+1) value to display
467 *
468 * Can be directly used by seq_file operations to display the current
469 * pid value.
470 */
471int trace_pid_show(struct seq_file *m, void *v)
472{
473 unsigned long pid = (unsigned long)v - 1;
474
475 seq_printf(m, "%lu\n", pid);
476 return 0;
477}
478
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400479/* 128 should be much more than enough */
480#define PID_BUF_SIZE 127
481
482int trace_pid_write(struct trace_pid_list *filtered_pids,
483 struct trace_pid_list **new_pid_list,
484 const char __user *ubuf, size_t cnt)
485{
486 struct trace_pid_list *pid_list;
487 struct trace_parser parser;
488 unsigned long val;
489 int nr_pids = 0;
490 ssize_t read = 0;
491 ssize_t ret = 0;
492 loff_t pos;
493 pid_t pid;
494
495 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
496 return -ENOMEM;
497
498 /*
499 * Always recreate a new array. The write is an all or nothing
500 * operation. Always create a new array when adding new pids by
501 * the user. If the operation fails, then the current list is
502 * not modified.
503 */
504 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
505 if (!pid_list)
506 return -ENOMEM;
507
508 pid_list->pid_max = READ_ONCE(pid_max);
509
510 /* Only truncating will shrink pid_max */
511 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
512 pid_list->pid_max = filtered_pids->pid_max;
513
514 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
515 if (!pid_list->pids) {
516 kfree(pid_list);
517 return -ENOMEM;
518 }
519
520 if (filtered_pids) {
521 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000522 for_each_set_bit(pid, filtered_pids->pids,
523 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400524 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400525 nr_pids++;
526 }
527 }
528
529 while (cnt > 0) {
530
531 pos = 0;
532
533 ret = trace_get_user(&parser, ubuf, cnt, &pos);
534 if (ret < 0 || !trace_parser_loaded(&parser))
535 break;
536
537 read += ret;
538 ubuf += ret;
539 cnt -= ret;
540
541 parser.buffer[parser.idx] = 0;
542
543 ret = -EINVAL;
544 if (kstrtoul(parser.buffer, 0, &val))
545 break;
546 if (val >= pid_list->pid_max)
547 break;
548
549 pid = (pid_t)val;
550
551 set_bit(pid, pid_list->pids);
552 nr_pids++;
553
554 trace_parser_clear(&parser);
555 ret = 0;
556 }
557 trace_parser_put(&parser);
558
559 if (ret < 0) {
560 trace_free_pid_list(pid_list);
561 return ret;
562 }
563
564 if (!nr_pids) {
565 /* Cleared the list of pids */
566 trace_free_pid_list(pid_list);
567 read = ret;
568 pid_list = NULL;
569 }
570
571 *new_pid_list = pid_list;
572
573 return read;
574}
575
Fabian Frederickad1438a2014-04-17 21:44:42 +0200576static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400577{
578 u64 ts;
579
580 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700581 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400582 return trace_clock_local();
583
Alexander Z Lam94571582013-08-02 18:36:16 -0700584 ts = ring_buffer_time_stamp(buf->buffer, cpu);
585 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400586
587 return ts;
588}
589
Alexander Z Lam94571582013-08-02 18:36:16 -0700590cycle_t ftrace_now(int cpu)
591{
592 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
593}
594
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400595/**
596 * tracing_is_enabled - Show if global_trace has been disabled
597 *
598 * Shows if the global trace has been enabled or not. It uses the
599 * mirror flag "buffer_disabled" to be used in fast paths such as for
600 * the irqsoff tracer. But it may be inaccurate due to races. If you
601 * need to know the accurate state, use tracing_is_on() which is a little
602 * slower, but accurate.
603 */
Steven Rostedt90369902008-11-05 16:05:44 -0500604int tracing_is_enabled(void)
605{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400606 /*
607 * For quick access (irqsoff uses this in fast path), just
608 * return the mirror variable of the state of the ring buffer.
609 * It's a little racy, but we don't really care.
610 */
611 smp_rmb();
612 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500613}
614
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200615/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400616 * trace_buf_size is the size in bytes that is allocated
617 * for a buffer. Note, the number of bytes is always rounded
618 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400619 *
620 * This number is purposely set to a low number of 16384.
621 * If the dump on oops happens, it will be much appreciated
622 * to not have to wait for all that output. Anyway this can be
623 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400625#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400626
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400627static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200628
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200629/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200630static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200631
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200632/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200633 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200634 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700635DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200636
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800637/*
638 * serialize the access of the ring buffer
639 *
640 * ring buffer serializes readers, but it is low level protection.
641 * The validity of the events (which returns by ring_buffer_peek() ..etc)
642 * are not protected by ring buffer.
643 *
644 * The content of events may become garbage if we allow other process consumes
645 * these events concurrently:
646 * A) the page of the consumed events may become a normal page
647 * (not reader page) in ring buffer, and this page will be rewrited
648 * by events producer.
649 * B) The page of the consumed events may become a page for splice_read,
650 * and this page will be returned to system.
651 *
652 * These primitives allow multi process access to different cpu ring buffer
653 * concurrently.
654 *
655 * These primitives don't distinguish read-only and read-consume access.
656 * Multi read-only access are also serialized.
657 */
658
659#ifdef CONFIG_SMP
660static DECLARE_RWSEM(all_cpu_access_lock);
661static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
662
663static inline void trace_access_lock(int cpu)
664{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500665 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800666 /* gain it for accessing the whole ring buffer. */
667 down_write(&all_cpu_access_lock);
668 } else {
669 /* gain it for accessing a cpu ring buffer. */
670
Steven Rostedtae3b5092013-01-23 15:22:59 -0500671 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800672 down_read(&all_cpu_access_lock);
673
674 /* Secondly block other access to this @cpu ring buffer. */
675 mutex_lock(&per_cpu(cpu_access_lock, cpu));
676 }
677}
678
679static inline void trace_access_unlock(int cpu)
680{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500681 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800682 up_write(&all_cpu_access_lock);
683 } else {
684 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
685 up_read(&all_cpu_access_lock);
686 }
687}
688
689static inline void trace_access_lock_init(void)
690{
691 int cpu;
692
693 for_each_possible_cpu(cpu)
694 mutex_init(&per_cpu(cpu_access_lock, cpu));
695}
696
697#else
698
699static DEFINE_MUTEX(access_lock);
700
701static inline void trace_access_lock(int cpu)
702{
703 (void)cpu;
704 mutex_lock(&access_lock);
705}
706
707static inline void trace_access_unlock(int cpu)
708{
709 (void)cpu;
710 mutex_unlock(&access_lock);
711}
712
713static inline void trace_access_lock_init(void)
714{
715}
716
717#endif
718
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400719#ifdef CONFIG_STACKTRACE
720static void __ftrace_trace_stack(struct ring_buffer *buffer,
721 unsigned long flags,
722 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400723static inline void ftrace_trace_stack(struct trace_array *tr,
724 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400725 unsigned long flags,
726 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400727
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400728#else
729static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
730 unsigned long flags,
731 int skip, int pc, struct pt_regs *regs)
732{
733}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400734static inline void ftrace_trace_stack(struct trace_array *tr,
735 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400736 unsigned long flags,
737 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400738{
739}
740
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400741#endif
742
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500743static __always_inline void
744trace_event_setup(struct ring_buffer_event *event,
745 int type, unsigned long flags, int pc)
746{
747 struct trace_entry *ent = ring_buffer_event_data(event);
748
749 tracing_generic_entry_update(ent, flags, pc);
750 ent->type = type;
751}
752
753static __always_inline struct ring_buffer_event *
754__trace_buffer_lock_reserve(struct ring_buffer *buffer,
755 int type,
756 unsigned long len,
757 unsigned long flags, int pc)
758{
759 struct ring_buffer_event *event;
760
761 event = ring_buffer_lock_reserve(buffer, len);
762 if (event != NULL)
763 trace_event_setup(event, type, flags, pc);
764
765 return event;
766}
767
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400768static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400769{
770 if (tr->trace_buffer.buffer)
771 ring_buffer_record_on(tr->trace_buffer.buffer);
772 /*
773 * This flag is looked at when buffers haven't been allocated
774 * yet, or by some tracers (like irqsoff), that just want to
775 * know if the ring buffer has been disabled, but it can handle
776 * races of where it gets disabled but we still do a record.
777 * As the check is in the fast path of the tracers, it is more
778 * important to be fast than accurate.
779 */
780 tr->buffer_disabled = 0;
781 /* Make the flag seen by readers */
782 smp_wmb();
783}
784
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200785/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500786 * tracing_on - enable tracing buffers
787 *
788 * This function enables tracing buffers that may have been
789 * disabled with tracing_off.
790 */
791void tracing_on(void)
792{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400793 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500794}
795EXPORT_SYMBOL_GPL(tracing_on);
796
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500797
798static __always_inline void
799__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
800{
801 __this_cpu_write(trace_cmdline_save, true);
802
803 /* If this is the temp buffer, we need to commit fully */
804 if (this_cpu_read(trace_buffered_event) == event) {
805 /* Length is in event->array[0] */
806 ring_buffer_write(buffer, event->array[0], &event->array[1]);
807 /* Release the temp buffer */
808 this_cpu_dec(trace_buffered_event_cnt);
809 } else
810 ring_buffer_unlock_commit(buffer, event);
811}
812
Steven Rostedt499e5472012-02-22 15:50:28 -0500813/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500814 * __trace_puts - write a constant string into the trace buffer.
815 * @ip: The address of the caller
816 * @str: The constant string to write
817 * @size: The size of the string.
818 */
819int __trace_puts(unsigned long ip, const char *str, int size)
820{
821 struct ring_buffer_event *event;
822 struct ring_buffer *buffer;
823 struct print_entry *entry;
824 unsigned long irq_flags;
825 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800826 int pc;
827
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400828 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800829 return 0;
830
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800831 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500832
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500833 if (unlikely(tracing_selftest_running || tracing_disabled))
834 return 0;
835
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500836 alloc = sizeof(*entry) + size + 2; /* possible \n added */
837
838 local_save_flags(irq_flags);
839 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500840 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
841 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500842 if (!event)
843 return 0;
844
845 entry = ring_buffer_event_data(event);
846 entry->ip = ip;
847
848 memcpy(&entry->buf, str, size);
849
850 /* Add a newline if necessary */
851 if (entry->buf[size - 1] != '\n') {
852 entry->buf[size] = '\n';
853 entry->buf[size + 1] = '\0';
854 } else
855 entry->buf[size] = '\0';
856
857 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400858 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500859
860 return size;
861}
862EXPORT_SYMBOL_GPL(__trace_puts);
863
864/**
865 * __trace_bputs - write the pointer to a constant string into trace buffer
866 * @ip: The address of the caller
867 * @str: The constant string to write to the buffer to
868 */
869int __trace_bputs(unsigned long ip, const char *str)
870{
871 struct ring_buffer_event *event;
872 struct ring_buffer *buffer;
873 struct bputs_entry *entry;
874 unsigned long irq_flags;
875 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800876 int pc;
877
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400878 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800879 return 0;
880
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800881 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500883 if (unlikely(tracing_selftest_running || tracing_disabled))
884 return 0;
885
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500886 local_save_flags(irq_flags);
887 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500888 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
889 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500890 if (!event)
891 return 0;
892
893 entry = ring_buffer_event_data(event);
894 entry->ip = ip;
895 entry->str = str;
896
897 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400898 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500899
900 return 1;
901}
902EXPORT_SYMBOL_GPL(__trace_bputs);
903
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500904#ifdef CONFIG_TRACER_SNAPSHOT
905/**
906 * trace_snapshot - take a snapshot of the current buffer.
907 *
908 * This causes a swap between the snapshot buffer and the current live
909 * tracing buffer. You can use this to take snapshots of the live
910 * trace when some condition is triggered, but continue to trace.
911 *
912 * Note, make sure to allocate the snapshot with either
913 * a tracing_snapshot_alloc(), or by doing it manually
914 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
915 *
916 * If the snapshot buffer is not allocated, it will stop tracing.
917 * Basically making a permanent snapshot.
918 */
919void tracing_snapshot(void)
920{
921 struct trace_array *tr = &global_trace;
922 struct tracer *tracer = tr->current_trace;
923 unsigned long flags;
924
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500925 if (in_nmi()) {
926 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
927 internal_trace_puts("*** snapshot is being ignored ***\n");
928 return;
929 }
930
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500931 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500932 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
933 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500934 tracing_off();
935 return;
936 }
937
938 /* Note, snapshot can not be used when the tracer uses it */
939 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500940 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
941 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500942 return;
943 }
944
945 local_irq_save(flags);
946 update_max_tr(tr, current, smp_processor_id());
947 local_irq_restore(flags);
948}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500949EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500950
951static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
952 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400953static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
954
955static int alloc_snapshot(struct trace_array *tr)
956{
957 int ret;
958
959 if (!tr->allocated_snapshot) {
960
961 /* allocate spare buffer */
962 ret = resize_buffer_duplicate_size(&tr->max_buffer,
963 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
964 if (ret < 0)
965 return ret;
966
967 tr->allocated_snapshot = true;
968 }
969
970 return 0;
971}
972
Fabian Frederickad1438a2014-04-17 21:44:42 +0200973static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400974{
975 /*
976 * We don't free the ring buffer. instead, resize it because
977 * The max_tr ring buffer has some state (e.g. ring->clock) and
978 * we want preserve it.
979 */
980 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
981 set_buffer_entries(&tr->max_buffer, 1);
982 tracing_reset_online_cpus(&tr->max_buffer);
983 tr->allocated_snapshot = false;
984}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500985
986/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500987 * tracing_alloc_snapshot - allocate snapshot buffer.
988 *
989 * This only allocates the snapshot buffer if it isn't already
990 * allocated - it doesn't also take a snapshot.
991 *
992 * This is meant to be used in cases where the snapshot buffer needs
993 * to be set up for events that can't sleep but need to be able to
994 * trigger a snapshot.
995 */
996int tracing_alloc_snapshot(void)
997{
998 struct trace_array *tr = &global_trace;
999 int ret;
1000
1001 ret = alloc_snapshot(tr);
1002 WARN_ON(ret < 0);
1003
1004 return ret;
1005}
1006EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1007
1008/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001009 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1010 *
1011 * This is similar to trace_snapshot(), but it will allocate the
1012 * snapshot buffer if it isn't already allocated. Use this only
1013 * where it is safe to sleep, as the allocation may sleep.
1014 *
1015 * This causes a swap between the snapshot buffer and the current live
1016 * tracing buffer. You can use this to take snapshots of the live
1017 * trace when some condition is triggered, but continue to trace.
1018 */
1019void tracing_snapshot_alloc(void)
1020{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001021 int ret;
1022
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001023 ret = tracing_alloc_snapshot();
1024 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001025 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001026
1027 tracing_snapshot();
1028}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001029EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001030#else
1031void tracing_snapshot(void)
1032{
1033 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1034}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001035EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001036int tracing_alloc_snapshot(void)
1037{
1038 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1039 return -ENODEV;
1040}
1041EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001042void tracing_snapshot_alloc(void)
1043{
1044 /* Give warning */
1045 tracing_snapshot();
1046}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001047EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001048#endif /* CONFIG_TRACER_SNAPSHOT */
1049
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -04001050static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001051{
1052 if (tr->trace_buffer.buffer)
1053 ring_buffer_record_off(tr->trace_buffer.buffer);
1054 /*
1055 * This flag is looked at when buffers haven't been allocated
1056 * yet, or by some tracers (like irqsoff), that just want to
1057 * know if the ring buffer has been disabled, but it can handle
1058 * races of where it gets disabled but we still do a record.
1059 * As the check is in the fast path of the tracers, it is more
1060 * important to be fast than accurate.
1061 */
1062 tr->buffer_disabled = 1;
1063 /* Make the flag seen by readers */
1064 smp_wmb();
1065}
1066
Steven Rostedt499e5472012-02-22 15:50:28 -05001067/**
1068 * tracing_off - turn off tracing buffers
1069 *
1070 * This function stops the tracing buffers from recording data.
1071 * It does not disable any overhead the tracers themselves may
1072 * be causing. This function simply causes all recording to
1073 * the ring buffers to fail.
1074 */
1075void tracing_off(void)
1076{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001077 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001078}
1079EXPORT_SYMBOL_GPL(tracing_off);
1080
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001081void disable_trace_on_warning(void)
1082{
1083 if (__disable_trace_on_warning)
1084 tracing_off();
1085}
1086
Steven Rostedt499e5472012-02-22 15:50:28 -05001087/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001088 * tracer_tracing_is_on - show real state of ring buffer enabled
1089 * @tr : the trace array to know if ring buffer is enabled
1090 *
1091 * Shows real state of the ring buffer if it is enabled or not.
1092 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001093int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001094{
1095 if (tr->trace_buffer.buffer)
1096 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1097 return !tr->buffer_disabled;
1098}
1099
Steven Rostedt499e5472012-02-22 15:50:28 -05001100/**
1101 * tracing_is_on - show state of ring buffers enabled
1102 */
1103int tracing_is_on(void)
1104{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001105 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001106}
1107EXPORT_SYMBOL_GPL(tracing_is_on);
1108
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001109static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001110{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001111 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001112
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001113 if (!str)
1114 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001115 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001116 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001117 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001118 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001119 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001120 return 1;
1121}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001122__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001123
Tim Bird0e950172010-02-25 15:36:43 -08001124static int __init set_tracing_thresh(char *str)
1125{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001126 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001127 int ret;
1128
1129 if (!str)
1130 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001131 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001132 if (ret < 0)
1133 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001134 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001135 return 1;
1136}
1137__setup("tracing_thresh=", set_tracing_thresh);
1138
Steven Rostedt57f50be2008-05-12 21:20:44 +02001139unsigned long nsecs_to_usecs(unsigned long nsecs)
1140{
1141 return nsecs / 1000;
1142}
1143
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001144/*
1145 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1146 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1147 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1148 * of strings in the order that the enums were defined.
1149 */
1150#undef C
1151#define C(a, b) b
1152
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001153/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001154static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001155 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001156 NULL
1157};
1158
Zhaolei5079f322009-08-25 16:12:56 +08001159static struct {
1160 u64 (*func)(void);
1161 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001162 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001163} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001164 { trace_clock_local, "local", 1 },
1165 { trace_clock_global, "global", 1 },
1166 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001167 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001168 { trace_clock, "perf", 1 },
1169 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001170 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001171 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001172};
1173
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001174/*
1175 * trace_parser_get_init - gets the buffer for trace parser
1176 */
1177int trace_parser_get_init(struct trace_parser *parser, int size)
1178{
1179 memset(parser, 0, sizeof(*parser));
1180
1181 parser->buffer = kmalloc(size, GFP_KERNEL);
1182 if (!parser->buffer)
1183 return 1;
1184
1185 parser->size = size;
1186 return 0;
1187}
1188
1189/*
1190 * trace_parser_put - frees the buffer for trace parser
1191 */
1192void trace_parser_put(struct trace_parser *parser)
1193{
1194 kfree(parser->buffer);
1195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1210{
1211 char ch;
1212 size_t read = 0;
1213 ssize_t ret;
1214
1215 if (!*ppos)
1216 trace_parser_clear(parser);
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221
1222 read++;
1223 cnt--;
1224
1225 /*
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1228 */
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1233 if (ret)
1234 goto out;
1235 read++;
1236 cnt--;
1237 }
1238
1239 /* only spaces were written */
1240 if (isspace(ch)) {
1241 *ppos += read;
1242 ret = read;
1243 goto out;
1244 }
1245
1246 parser->idx = 0;
1247 }
1248
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001251 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001252 parser->buffer[parser->idx++] = ch;
1253 else {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257 ret = get_user(ch, ubuf++);
1258 if (ret)
1259 goto out;
1260 read++;
1261 cnt--;
1262 }
1263
1264 /* We either got finished input or we have to wait for another call. */
1265 if (isspace(ch)) {
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001268 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001271 } else {
1272 ret = -EINVAL;
1273 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001274 }
1275
1276 *ppos += read;
1277 ret = read;
1278
1279out:
1280 return ret;
1281}
1282
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001283/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001285{
1286 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001287
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001288 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001289 return -EBUSY;
1290
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001291 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001292 if (cnt > len)
1293 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001295
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001296 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001297 return cnt;
1298}
1299
Tim Bird0e950172010-02-25 15:36:43 -08001300unsigned long __read_mostly tracing_thresh;
1301
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001302#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001315
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001316 max_buf->cpu = cpu;
1317 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001318
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001319 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001322
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001324 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001325 /*
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1328 */
1329 if (tsk == current)
1330 max_data->uid = current_uid();
1331 else
1332 max_data->uid = task_uid(tsk);
1333
Steven Rostedt8248ac02009-09-02 12:27:41 -04001334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001337
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1340}
1341
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
Ingo Molnare309b412008-05-12 21:20:51 +02001351void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001354 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001356 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001357 return;
1358
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001359 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001360
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001361 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001362 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001364 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001365 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001366
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001367 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001368
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001372
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001373 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001374 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001375}
1376
1377/**
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1379 * @tr - tracer
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001382 *
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001384 */
Ingo Molnare309b412008-05-12 21:20:51 +02001385void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001386update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001388 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001389
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001390 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001391 return;
1392
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001393 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001394 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001395 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001397 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001398 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001399
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001400 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001401
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001403
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001404 if (ret == -EBUSY) {
1405 /*
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1410 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001412 "Failed to swap buffers due to commit in progress\n");
1413 }
1414
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001416
1417 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001418 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001419}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001420#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001421
Rabin Vincente30f53a2014-11-10 19:46:34 +01001422static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001423{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001426 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001427
Rabin Vincente30f53a2014-11-10 19:46:34 +01001428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001430}
1431
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001432#ifdef CONFIG_FTRACE_STARTUP_TEST
1433static int run_tracer_selftest(struct tracer *type)
1434{
1435 struct trace_array *tr = &global_trace;
1436 struct tracer *saved_tracer = tr->current_trace;
1437 int ret;
1438
1439 if (!type->selftest || tracing_selftest_disabled)
1440 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001441
1442 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001443 * Run a selftest on this tracer.
1444 * Here we reset the trace buffer, and set the current
1445 * tracer to be this tracer. The tracer can then run some
1446 * internal tracing to verify that everything is in order.
1447 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001448 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001449 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001450
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001451 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001452
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001453#ifdef CONFIG_TRACER_MAX_TRACE
1454 if (type->use_max_tr) {
1455 /* If we expanded the buffers, make sure the max is expanded too */
1456 if (ring_buffer_expanded)
1457 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1458 RING_BUFFER_ALL_CPUS);
1459 tr->allocated_snapshot = true;
1460 }
1461#endif
1462
1463 /* the test is responsible for initializing and enabling */
1464 pr_info("Testing tracer %s: ", type->name);
1465 ret = type->selftest(type, tr);
1466 /* the test is responsible for resetting too */
1467 tr->current_trace = saved_tracer;
1468 if (ret) {
1469 printk(KERN_CONT "FAILED!\n");
1470 /* Add the warning after printing 'FAILED' */
1471 WARN_ON(1);
1472 return -1;
1473 }
1474 /* Only reset on passing, to avoid touching corrupted buffers */
1475 tracing_reset_online_cpus(&tr->trace_buffer);
1476
1477#ifdef CONFIG_TRACER_MAX_TRACE
1478 if (type->use_max_tr) {
1479 tr->allocated_snapshot = false;
1480
1481 /* Shrink the max buffer again */
1482 if (ring_buffer_expanded)
1483 ring_buffer_resize(tr->max_buffer.buffer, 1,
1484 RING_BUFFER_ALL_CPUS);
1485 }
1486#endif
1487
1488 printk(KERN_CONT "PASSED\n");
1489 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001490}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001491#else
1492static inline int run_tracer_selftest(struct tracer *type)
1493{
1494 return 0;
1495}
1496#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001498static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1499
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001500static void __init apply_trace_boot_options(void);
1501
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001502/**
1503 * register_tracer - register a tracer with the ftrace system.
1504 * @type - the plugin for the tracer
1505 *
1506 * Register a new plugin tracer.
1507 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001508int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001509{
1510 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511 int ret = 0;
1512
1513 if (!type->name) {
1514 pr_info("Tracer must have a name\n");
1515 return -1;
1516 }
1517
Dan Carpenter24a461d2010-07-10 12:06:44 +02001518 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001519 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1520 return -1;
1521 }
1522
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001523 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001524
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001525 tracing_selftest_running = true;
1526
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527 for (t = trace_types; t; t = t->next) {
1528 if (strcmp(type->name, t->name) == 0) {
1529 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001530 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001531 type->name);
1532 ret = -1;
1533 goto out;
1534 }
1535 }
1536
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001537 if (!type->set_flag)
1538 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001539 if (!type->flags) {
1540 /*allocate a dummy tracer_flags*/
1541 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001542 if (!type->flags) {
1543 ret = -ENOMEM;
1544 goto out;
1545 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001546 type->flags->val = 0;
1547 type->flags->opts = dummy_tracer_opt;
1548 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001549 if (!type->flags->opts)
1550 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001551
Chunyu Hud39cdd22016-03-08 21:37:01 +08001552 /* store the tracer for __set_tracer_option */
1553 type->flags->trace = type;
1554
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001555 ret = run_tracer_selftest(type);
1556 if (ret < 0)
1557 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001558
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001559 type->next = trace_types;
1560 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001561 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001562
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001563 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001564 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001565 mutex_unlock(&trace_types_lock);
1566
Steven Rostedtdac74942009-02-05 01:13:38 -05001567 if (ret || !default_bootup_tracer)
1568 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001569
Li Zefanee6c2c12009-09-18 14:06:47 +08001570 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001571 goto out_unlock;
1572
1573 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1574 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001575 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001576 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001577
1578 apply_trace_boot_options();
1579
Steven Rostedtdac74942009-02-05 01:13:38 -05001580 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001581 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001582#ifdef CONFIG_FTRACE_STARTUP_TEST
1583 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1584 type->name);
1585#endif
1586
1587 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588 return ret;
1589}
1590
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001591void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001592{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001593 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001594
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001595 if (!buffer)
1596 return;
1597
Steven Rostedtf6339032009-09-04 12:35:16 -04001598 ring_buffer_record_disable(buffer);
1599
1600 /* Make sure all commits have finished */
1601 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001602 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001603
1604 ring_buffer_record_enable(buffer);
1605}
1606
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001607void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001608{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001609 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001610 int cpu;
1611
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001612 if (!buffer)
1613 return;
1614
Steven Rostedt621968c2009-09-04 12:02:35 -04001615 ring_buffer_record_disable(buffer);
1616
1617 /* Make sure all commits have finished */
1618 synchronize_sched();
1619
Alexander Z Lam94571582013-08-02 18:36:16 -07001620 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001621
1622 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001623 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001624
1625 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001626}
1627
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001628/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001629void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001630{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001631 struct trace_array *tr;
1632
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001633 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001634 tracing_reset_online_cpus(&tr->trace_buffer);
1635#ifdef CONFIG_TRACER_MAX_TRACE
1636 tracing_reset_online_cpus(&tr->max_buffer);
1637#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001638 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001639}
1640
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001641#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001642#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001643static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001644struct saved_cmdlines_buffer {
1645 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1646 unsigned *map_cmdline_to_pid;
1647 unsigned cmdline_num;
1648 int cmdline_idx;
1649 char *saved_cmdlines;
1650};
1651static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001652
Steven Rostedt25b0b442008-05-12 21:21:00 +02001653/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001654static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001655
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001656static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001657{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001658 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1659}
1660
1661static inline void set_cmdline(int idx, const char *cmdline)
1662{
1663 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1664}
1665
1666static int allocate_cmdlines_buffer(unsigned int val,
1667 struct saved_cmdlines_buffer *s)
1668{
1669 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1670 GFP_KERNEL);
1671 if (!s->map_cmdline_to_pid)
1672 return -ENOMEM;
1673
1674 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1675 if (!s->saved_cmdlines) {
1676 kfree(s->map_cmdline_to_pid);
1677 return -ENOMEM;
1678 }
1679
1680 s->cmdline_idx = 0;
1681 s->cmdline_num = val;
1682 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1683 sizeof(s->map_pid_to_cmdline));
1684 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1685 val * sizeof(*s->map_cmdline_to_pid));
1686
1687 return 0;
1688}
1689
1690static int trace_create_savedcmd(void)
1691{
1692 int ret;
1693
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001694 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001695 if (!savedcmd)
1696 return -ENOMEM;
1697
1698 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1699 if (ret < 0) {
1700 kfree(savedcmd);
1701 savedcmd = NULL;
1702 return -ENOMEM;
1703 }
1704
1705 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001706}
1707
Carsten Emdeb5130b12009-09-13 01:43:07 +02001708int is_tracing_stopped(void)
1709{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001710 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001711}
1712
Steven Rostedt0f048702008-11-05 16:05:44 -05001713/**
1714 * tracing_start - quick start of the tracer
1715 *
1716 * If tracing is enabled but was stopped by tracing_stop,
1717 * this will start the tracer back up.
1718 */
1719void tracing_start(void)
1720{
1721 struct ring_buffer *buffer;
1722 unsigned long flags;
1723
1724 if (tracing_disabled)
1725 return;
1726
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001727 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1728 if (--global_trace.stop_count) {
1729 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001730 /* Someone screwed up their debugging */
1731 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001732 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001733 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001734 goto out;
1735 }
1736
Steven Rostedta2f80712010-03-12 19:56:00 -05001737 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001738 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001739
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001740 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001741 if (buffer)
1742 ring_buffer_record_enable(buffer);
1743
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001744#ifdef CONFIG_TRACER_MAX_TRACE
1745 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001746 if (buffer)
1747 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001748#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001749
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001750 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001751
Steven Rostedt0f048702008-11-05 16:05:44 -05001752 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001753 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1754}
1755
1756static void tracing_start_tr(struct trace_array *tr)
1757{
1758 struct ring_buffer *buffer;
1759 unsigned long flags;
1760
1761 if (tracing_disabled)
1762 return;
1763
1764 /* If global, we need to also start the max tracer */
1765 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1766 return tracing_start();
1767
1768 raw_spin_lock_irqsave(&tr->start_lock, flags);
1769
1770 if (--tr->stop_count) {
1771 if (tr->stop_count < 0) {
1772 /* Someone screwed up their debugging */
1773 WARN_ON_ONCE(1);
1774 tr->stop_count = 0;
1775 }
1776 goto out;
1777 }
1778
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001779 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001780 if (buffer)
1781 ring_buffer_record_enable(buffer);
1782
1783 out:
1784 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001785}
1786
1787/**
1788 * tracing_stop - quick stop of the tracer
1789 *
1790 * Light weight way to stop tracing. Use in conjunction with
1791 * tracing_start.
1792 */
1793void tracing_stop(void)
1794{
1795 struct ring_buffer *buffer;
1796 unsigned long flags;
1797
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001798 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1799 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001800 goto out;
1801
Steven Rostedta2f80712010-03-12 19:56:00 -05001802 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001803 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001804
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001805 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001806 if (buffer)
1807 ring_buffer_record_disable(buffer);
1808
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001809#ifdef CONFIG_TRACER_MAX_TRACE
1810 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001811 if (buffer)
1812 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001813#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001814
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001815 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001816
Steven Rostedt0f048702008-11-05 16:05:44 -05001817 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001818 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1819}
1820
1821static void tracing_stop_tr(struct trace_array *tr)
1822{
1823 struct ring_buffer *buffer;
1824 unsigned long flags;
1825
1826 /* If global, we need to also stop the max tracer */
1827 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1828 return tracing_stop();
1829
1830 raw_spin_lock_irqsave(&tr->start_lock, flags);
1831 if (tr->stop_count++)
1832 goto out;
1833
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001834 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001835 if (buffer)
1836 ring_buffer_record_disable(buffer);
1837
1838 out:
1839 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001840}
1841
Ingo Molnare309b412008-05-12 21:20:51 +02001842void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001843
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001844static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001845{
Carsten Emdea635cf02009-03-18 09:00:41 +01001846 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001847
1848 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001849 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001850
1851 /*
1852 * It's not the end of the world if we don't get
1853 * the lock, but we also don't want to spin
1854 * nor do we want to disable interrupts,
1855 * so if we miss here, then better luck next time.
1856 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001857 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001858 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001859
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001860 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001861 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001862 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001863
Carsten Emdea635cf02009-03-18 09:00:41 +01001864 /*
1865 * Check whether the cmdline buffer at idx has a pid
1866 * mapped. We are going to overwrite that entry so we
1867 * need to clear the map_pid_to_cmdline. Otherwise we
1868 * would read the new comm for the old pid.
1869 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001870 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001871 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001872 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001873
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001874 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1875 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001876
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001877 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001878 }
1879
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001880 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001881
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001882 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001883
1884 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001885}
1886
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001887static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001888{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001889 unsigned map;
1890
Steven Rostedt4ca530852009-03-16 19:20:15 -04001891 if (!pid) {
1892 strcpy(comm, "<idle>");
1893 return;
1894 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001895
Steven Rostedt74bf4072010-01-25 15:11:53 -05001896 if (WARN_ON_ONCE(pid < 0)) {
1897 strcpy(comm, "<XXX>");
1898 return;
1899 }
1900
Steven Rostedt4ca530852009-03-16 19:20:15 -04001901 if (pid > PID_MAX_DEFAULT) {
1902 strcpy(comm, "<...>");
1903 return;
1904 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001905
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001906 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001907 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001908 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001909 else
1910 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001911}
1912
1913void trace_find_cmdline(int pid, char comm[])
1914{
1915 preempt_disable();
1916 arch_spin_lock(&trace_cmdline_lock);
1917
1918 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001919
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001920 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001921 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001922}
1923
Ingo Molnare309b412008-05-12 21:20:51 +02001924void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001925{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001926 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001927 return;
1928
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001929 if (!__this_cpu_read(trace_cmdline_save))
1930 return;
1931
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001932 if (trace_save_cmdline(tsk))
1933 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001934}
1935
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001936void
Steven Rostedt38697052008-10-01 13:14:09 -04001937tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1938 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001939{
1940 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001941
Steven Rostedt777e2082008-09-29 23:02:42 -04001942 entry->preempt_count = pc & 0xff;
1943 entry->pid = (tsk) ? tsk->pid : 0;
1944 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001945#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001946 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001947#else
1948 TRACE_FLAG_IRQS_NOSUPPORT |
1949#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01001950 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001951 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1952 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001953 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1954 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001955}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001956EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001957
Steven Rostedte77405a2009-09-02 14:17:06 -04001958struct ring_buffer_event *
1959trace_buffer_lock_reserve(struct ring_buffer *buffer,
1960 int type,
1961 unsigned long len,
1962 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001963{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001964 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001965}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001966
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04001967DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1968DEFINE_PER_CPU(int, trace_buffered_event_cnt);
1969static int trace_buffered_event_ref;
1970
1971/**
1972 * trace_buffered_event_enable - enable buffering events
1973 *
1974 * When events are being filtered, it is quicker to use a temporary
1975 * buffer to write the event data into if there's a likely chance
1976 * that it will not be committed. The discard of the ring buffer
1977 * is not as fast as committing, and is much slower than copying
1978 * a commit.
1979 *
1980 * When an event is to be filtered, allocate per cpu buffers to
1981 * write the event data into, and if the event is filtered and discarded
1982 * it is simply dropped, otherwise, the entire data is to be committed
1983 * in one shot.
1984 */
1985void trace_buffered_event_enable(void)
1986{
1987 struct ring_buffer_event *event;
1988 struct page *page;
1989 int cpu;
1990
1991 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
1992
1993 if (trace_buffered_event_ref++)
1994 return;
1995
1996 for_each_tracing_cpu(cpu) {
1997 page = alloc_pages_node(cpu_to_node(cpu),
1998 GFP_KERNEL | __GFP_NORETRY, 0);
1999 if (!page)
2000 goto failed;
2001
2002 event = page_address(page);
2003 memset(event, 0, sizeof(*event));
2004
2005 per_cpu(trace_buffered_event, cpu) = event;
2006
2007 preempt_disable();
2008 if (cpu == smp_processor_id() &&
2009 this_cpu_read(trace_buffered_event) !=
2010 per_cpu(trace_buffered_event, cpu))
2011 WARN_ON_ONCE(1);
2012 preempt_enable();
2013 }
2014
2015 return;
2016 failed:
2017 trace_buffered_event_disable();
2018}
2019
2020static void enable_trace_buffered_event(void *data)
2021{
2022 /* Probably not needed, but do it anyway */
2023 smp_rmb();
2024 this_cpu_dec(trace_buffered_event_cnt);
2025}
2026
2027static void disable_trace_buffered_event(void *data)
2028{
2029 this_cpu_inc(trace_buffered_event_cnt);
2030}
2031
2032/**
2033 * trace_buffered_event_disable - disable buffering events
2034 *
2035 * When a filter is removed, it is faster to not use the buffered
2036 * events, and to commit directly into the ring buffer. Free up
2037 * the temp buffers when there are no more users. This requires
2038 * special synchronization with current events.
2039 */
2040void trace_buffered_event_disable(void)
2041{
2042 int cpu;
2043
2044 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2045
2046 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2047 return;
2048
2049 if (--trace_buffered_event_ref)
2050 return;
2051
2052 preempt_disable();
2053 /* For each CPU, set the buffer as used. */
2054 smp_call_function_many(tracing_buffer_mask,
2055 disable_trace_buffered_event, NULL, 1);
2056 preempt_enable();
2057
2058 /* Wait for all current users to finish */
2059 synchronize_sched();
2060
2061 for_each_tracing_cpu(cpu) {
2062 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2063 per_cpu(trace_buffered_event, cpu) = NULL;
2064 }
2065 /*
2066 * Make sure trace_buffered_event is NULL before clearing
2067 * trace_buffered_event_cnt.
2068 */
2069 smp_wmb();
2070
2071 preempt_disable();
2072 /* Do the work on each cpu */
2073 smp_call_function_many(tracing_buffer_mask,
2074 enable_trace_buffered_event, NULL, 1);
2075 preempt_enable();
2076}
2077
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002078static struct ring_buffer *temp_buffer;
2079
Steven Rostedtef5580d2009-02-27 19:38:04 -05002080struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002081trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002082 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002083 int type, unsigned long len,
2084 unsigned long flags, int pc)
2085{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002086 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002087 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002088
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002089 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002090
2091 if ((trace_file->flags &
2092 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2093 (entry = this_cpu_read(trace_buffered_event))) {
2094 /* Try to use the per cpu buffer first */
2095 val = this_cpu_inc_return(trace_buffered_event_cnt);
2096 if (val == 1) {
2097 trace_event_setup(entry, type, flags, pc);
2098 entry->array[0] = len;
2099 return entry;
2100 }
2101 this_cpu_dec(trace_buffered_event_cnt);
2102 }
2103
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002104 entry = __trace_buffer_lock_reserve(*current_rb,
2105 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002106 /*
2107 * If tracing is off, but we have triggers enabled
2108 * we still need to look at the event data. Use the temp_buffer
2109 * to store the trace event for the tigger to use. It's recusive
2110 * safe and will not be recorded anywhere.
2111 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002112 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002113 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002114 entry = __trace_buffer_lock_reserve(*current_rb,
2115 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002116 }
2117 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002118}
2119EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2120
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002121static DEFINE_SPINLOCK(tracepoint_iter_lock);
2122static DEFINE_MUTEX(tracepoint_printk_mutex);
2123
2124static void output_printk(struct trace_event_buffer *fbuffer)
2125{
2126 struct trace_event_call *event_call;
2127 struct trace_event *event;
2128 unsigned long flags;
2129 struct trace_iterator *iter = tracepoint_print_iter;
2130
2131 /* We should never get here if iter is NULL */
2132 if (WARN_ON_ONCE(!iter))
2133 return;
2134
2135 event_call = fbuffer->trace_file->event_call;
2136 if (!event_call || !event_call->event.funcs ||
2137 !event_call->event.funcs->trace)
2138 return;
2139
2140 event = &fbuffer->trace_file->event_call->event;
2141
2142 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2143 trace_seq_init(&iter->seq);
2144 iter->ent = fbuffer->entry;
2145 event_call->event.funcs->trace(iter, 0, event);
2146 trace_seq_putc(&iter->seq, 0);
2147 printk("%s", iter->seq.buffer);
2148
2149 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2150}
2151
2152int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2153 void __user *buffer, size_t *lenp,
2154 loff_t *ppos)
2155{
2156 int save_tracepoint_printk;
2157 int ret;
2158
2159 mutex_lock(&tracepoint_printk_mutex);
2160 save_tracepoint_printk = tracepoint_printk;
2161
2162 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2163
2164 /*
2165 * This will force exiting early, as tracepoint_printk
2166 * is always zero when tracepoint_printk_iter is not allocated
2167 */
2168 if (!tracepoint_print_iter)
2169 tracepoint_printk = 0;
2170
2171 if (save_tracepoint_printk == tracepoint_printk)
2172 goto out;
2173
2174 if (tracepoint_printk)
2175 static_key_enable(&tracepoint_printk_key.key);
2176 else
2177 static_key_disable(&tracepoint_printk_key.key);
2178
2179 out:
2180 mutex_unlock(&tracepoint_printk_mutex);
2181
2182 return ret;
2183}
2184
2185void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2186{
2187 if (static_key_false(&tracepoint_printk_key.key))
2188 output_printk(fbuffer);
2189
2190 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2191 fbuffer->event, fbuffer->entry,
2192 fbuffer->flags, fbuffer->pc);
2193}
2194EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2195
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002196void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2197 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002198 struct ring_buffer_event *event,
2199 unsigned long flags, int pc,
2200 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002201{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002202 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002203
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002204 /*
2205 * If regs is not set, then skip the following callers:
2206 * trace_buffer_unlock_commit_regs
2207 * event_trigger_unlock_commit
2208 * trace_event_buffer_commit
2209 * trace_event_raw_event_sched_switch
2210 * Note, we can still get here via blktrace, wakeup tracer
2211 * and mmiotrace, but that's ok if they lose a function or
2212 * two. They are that meaningful.
2213 */
2214 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002215 ftrace_trace_userstack(buffer, flags, pc);
2216}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002217
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002218/*
2219 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2220 */
2221void
2222trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2223 struct ring_buffer_event *event)
2224{
2225 __buffer_unlock_commit(buffer, event);
2226}
2227
Chunyan Zhang478409d2016-11-21 15:57:18 +08002228static void
2229trace_process_export(struct trace_export *export,
2230 struct ring_buffer_event *event)
2231{
2232 struct trace_entry *entry;
2233 unsigned int size = 0;
2234
2235 entry = ring_buffer_event_data(event);
2236 size = ring_buffer_event_length(event);
2237 export->write(entry, size);
2238}
2239
2240static DEFINE_MUTEX(ftrace_export_lock);
2241
2242static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2243
2244static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2245
2246static inline void ftrace_exports_enable(void)
2247{
2248 static_branch_enable(&ftrace_exports_enabled);
2249}
2250
2251static inline void ftrace_exports_disable(void)
2252{
2253 static_branch_disable(&ftrace_exports_enabled);
2254}
2255
2256void ftrace_exports(struct ring_buffer_event *event)
2257{
2258 struct trace_export *export;
2259
2260 preempt_disable_notrace();
2261
2262 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2263 while (export) {
2264 trace_process_export(export, event);
2265 export = rcu_dereference_raw_notrace(export->next);
2266 }
2267
2268 preempt_enable_notrace();
2269}
2270
2271static inline void
2272add_trace_export(struct trace_export **list, struct trace_export *export)
2273{
2274 rcu_assign_pointer(export->next, *list);
2275 /*
2276 * We are entering export into the list but another
2277 * CPU might be walking that list. We need to make sure
2278 * the export->next pointer is valid before another CPU sees
2279 * the export pointer included into the list.
2280 */
2281 rcu_assign_pointer(*list, export);
2282}
2283
2284static inline int
2285rm_trace_export(struct trace_export **list, struct trace_export *export)
2286{
2287 struct trace_export **p;
2288
2289 for (p = list; *p != NULL; p = &(*p)->next)
2290 if (*p == export)
2291 break;
2292
2293 if (*p != export)
2294 return -1;
2295
2296 rcu_assign_pointer(*p, (*p)->next);
2297
2298 return 0;
2299}
2300
2301static inline void
2302add_ftrace_export(struct trace_export **list, struct trace_export *export)
2303{
2304 if (*list == NULL)
2305 ftrace_exports_enable();
2306
2307 add_trace_export(list, export);
2308}
2309
2310static inline int
2311rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2312{
2313 int ret;
2314
2315 ret = rm_trace_export(list, export);
2316 if (*list == NULL)
2317 ftrace_exports_disable();
2318
2319 return ret;
2320}
2321
2322int register_ftrace_export(struct trace_export *export)
2323{
2324 if (WARN_ON_ONCE(!export->write))
2325 return -1;
2326
2327 mutex_lock(&ftrace_export_lock);
2328
2329 add_ftrace_export(&ftrace_exports_list, export);
2330
2331 mutex_unlock(&ftrace_export_lock);
2332
2333 return 0;
2334}
2335EXPORT_SYMBOL_GPL(register_ftrace_export);
2336
2337int unregister_ftrace_export(struct trace_export *export)
2338{
2339 int ret;
2340
2341 mutex_lock(&ftrace_export_lock);
2342
2343 ret = rm_ftrace_export(&ftrace_exports_list, export);
2344
2345 mutex_unlock(&ftrace_export_lock);
2346
2347 return ret;
2348}
2349EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2350
Ingo Molnare309b412008-05-12 21:20:51 +02002351void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002352trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002353 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2354 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002356 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002357 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002358 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002359 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002360
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002361 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2362 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002363 if (!event)
2364 return;
2365 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002366 entry->ip = ip;
2367 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002368
Chunyan Zhang478409d2016-11-21 15:57:18 +08002369 if (!call_filter_check_discard(call, entry, buffer, event)) {
2370 if (static_branch_unlikely(&ftrace_exports_enabled))
2371 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002372 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002373 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002374}
2375
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002376#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002377
2378#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2379struct ftrace_stack {
2380 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2381};
2382
2383static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2384static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2385
Steven Rostedte77405a2009-09-02 14:17:06 -04002386static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002387 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002388 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002389{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002390 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002391 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002392 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002393 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002394 int use_stack;
2395 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002396
2397 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002398 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002399
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002400 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002401 * Add two, for this function and the call to save_stack_trace()
2402 * If regs is set, then these functions will not be in the way.
2403 */
2404 if (!regs)
2405 trace.skip += 2;
2406
2407 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002408 * Since events can happen in NMIs there's no safe way to
2409 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2410 * or NMI comes in, it will just have to use the default
2411 * FTRACE_STACK_SIZE.
2412 */
2413 preempt_disable_notrace();
2414
Shan Wei82146522012-11-19 13:21:01 +08002415 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002416 /*
2417 * We don't need any atomic variables, just a barrier.
2418 * If an interrupt comes in, we don't care, because it would
2419 * have exited and put the counter back to what we want.
2420 * We just need a barrier to keep gcc from moving things
2421 * around.
2422 */
2423 barrier();
2424 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002425 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002426 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2427
2428 if (regs)
2429 save_stack_trace_regs(regs, &trace);
2430 else
2431 save_stack_trace(&trace);
2432
2433 if (trace.nr_entries > size)
2434 size = trace.nr_entries;
2435 } else
2436 /* From now on, use_stack is a boolean */
2437 use_stack = 0;
2438
2439 size *= sizeof(unsigned long);
2440
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002441 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2442 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002443 if (!event)
2444 goto out;
2445 entry = ring_buffer_event_data(event);
2446
2447 memset(&entry->caller, 0, size);
2448
2449 if (use_stack)
2450 memcpy(&entry->caller, trace.entries,
2451 trace.nr_entries * sizeof(unsigned long));
2452 else {
2453 trace.max_entries = FTRACE_STACK_ENTRIES;
2454 trace.entries = entry->caller;
2455 if (regs)
2456 save_stack_trace_regs(regs, &trace);
2457 else
2458 save_stack_trace(&trace);
2459 }
2460
2461 entry->size = trace.nr_entries;
2462
Tom Zanussif306cc82013-10-24 08:34:17 -05002463 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002464 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002465
2466 out:
2467 /* Again, don't let gcc optimize things here */
2468 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002469 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002470 preempt_enable_notrace();
2471
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002472}
2473
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002474static inline void ftrace_trace_stack(struct trace_array *tr,
2475 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002476 unsigned long flags,
2477 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002478{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002479 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002480 return;
2481
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002482 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002483}
2484
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002485void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2486 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002487{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002488 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04002489}
2490
Steven Rostedt03889382009-12-11 09:48:22 -05002491/**
2492 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002493 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002494 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002495void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002496{
2497 unsigned long flags;
2498
2499 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002500 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002501
2502 local_save_flags(flags);
2503
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002504 /*
2505 * Skip 3 more, seems to get us at the caller of
2506 * this function.
2507 */
2508 skip += 3;
2509 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2510 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002511}
2512
Steven Rostedt91e86e52010-11-10 12:56:12 +01002513static DEFINE_PER_CPU(int, user_stack_count);
2514
Steven Rostedte77405a2009-09-02 14:17:06 -04002515void
2516ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002517{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002518 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002519 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002520 struct userstack_entry *entry;
2521 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002522
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002523 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002524 return;
2525
Steven Rostedtb6345872010-03-12 20:03:30 -05002526 /*
2527 * NMIs can not handle page faults, even with fix ups.
2528 * The save user stack can (and often does) fault.
2529 */
2530 if (unlikely(in_nmi()))
2531 return;
2532
Steven Rostedt91e86e52010-11-10 12:56:12 +01002533 /*
2534 * prevent recursion, since the user stack tracing may
2535 * trigger other kernel events.
2536 */
2537 preempt_disable();
2538 if (__this_cpu_read(user_stack_count))
2539 goto out;
2540
2541 __this_cpu_inc(user_stack_count);
2542
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002543 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2544 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002545 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002546 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002547 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002548
Steven Rostedt48659d32009-09-11 11:36:23 -04002549 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002550 memset(&entry->caller, 0, sizeof(entry->caller));
2551
2552 trace.nr_entries = 0;
2553 trace.max_entries = FTRACE_STACK_ENTRIES;
2554 trace.skip = 0;
2555 trace.entries = entry->caller;
2556
2557 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002558 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002559 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002560
Li Zefan1dbd1952010-12-09 15:47:56 +08002561 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002562 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002563 out:
2564 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002565}
2566
Hannes Eder4fd27352009-02-10 19:44:12 +01002567#ifdef UNUSED
2568static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002569{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002570 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002571}
Hannes Eder4fd27352009-02-10 19:44:12 +01002572#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002573
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002574#endif /* CONFIG_STACKTRACE */
2575
Steven Rostedt07d777f2011-09-22 14:01:55 -04002576/* created for use with alloc_percpu */
2577struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002578 int nesting;
2579 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002580};
2581
2582static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002583
2584/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002585 * Thise allows for lockless recording. If we're nested too deeply, then
2586 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002587 */
2588static char *get_trace_buf(void)
2589{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002590 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002591
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002592 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002593 return NULL;
2594
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002595 return &buffer->buffer[buffer->nesting++][0];
2596}
2597
2598static void put_trace_buf(void)
2599{
2600 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002601}
2602
2603static int alloc_percpu_trace_buffer(void)
2604{
2605 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002606
2607 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002608 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2609 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002610
2611 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002612 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002613}
2614
Steven Rostedt81698832012-10-11 10:15:05 -04002615static int buffers_allocated;
2616
Steven Rostedt07d777f2011-09-22 14:01:55 -04002617void trace_printk_init_buffers(void)
2618{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002619 if (buffers_allocated)
2620 return;
2621
2622 if (alloc_percpu_trace_buffer())
2623 return;
2624
Steven Rostedt2184db42014-05-28 13:14:40 -04002625 /* trace_printk() is for debug use only. Don't use it in production. */
2626
Joe Perchesa395d6a2016-03-22 14:28:09 -07002627 pr_warn("\n");
2628 pr_warn("**********************************************************\n");
2629 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2630 pr_warn("** **\n");
2631 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2632 pr_warn("** **\n");
2633 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2634 pr_warn("** unsafe for production use. **\n");
2635 pr_warn("** **\n");
2636 pr_warn("** If you see this message and you are not debugging **\n");
2637 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2638 pr_warn("** **\n");
2639 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2640 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002641
Steven Rostedtb382ede62012-10-10 21:44:34 -04002642 /* Expand the buffers to set size */
2643 tracing_update_buffers();
2644
Steven Rostedt07d777f2011-09-22 14:01:55 -04002645 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002646
2647 /*
2648 * trace_printk_init_buffers() can be called by modules.
2649 * If that happens, then we need to start cmdline recording
2650 * directly here. If the global_trace.buffer is already
2651 * allocated here, then this was called by module code.
2652 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002653 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002654 tracing_start_cmdline_record();
2655}
2656
2657void trace_printk_start_comm(void)
2658{
2659 /* Start tracing comms if trace printk is set */
2660 if (!buffers_allocated)
2661 return;
2662 tracing_start_cmdline_record();
2663}
2664
2665static void trace_printk_start_stop_comm(int enabled)
2666{
2667 if (!buffers_allocated)
2668 return;
2669
2670 if (enabled)
2671 tracing_start_cmdline_record();
2672 else
2673 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002674}
2675
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002676/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002677 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002678 *
2679 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002680int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002681{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002682 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002683 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002684 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002685 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002686 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002687 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002688 char *tbuffer;
2689 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002690
2691 if (unlikely(tracing_selftest_running || tracing_disabled))
2692 return 0;
2693
2694 /* Don't pollute graph traces with trace_vprintk internals */
2695 pause_graph_tracing();
2696
2697 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002698 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002699
Steven Rostedt07d777f2011-09-22 14:01:55 -04002700 tbuffer = get_trace_buf();
2701 if (!tbuffer) {
2702 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002703 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002704 }
2705
2706 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2707
2708 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002709 goto out;
2710
Steven Rostedt07d777f2011-09-22 14:01:55 -04002711 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002712 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002713 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002714 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2715 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002716 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002717 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002718 entry = ring_buffer_event_data(event);
2719 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002720 entry->fmt = fmt;
2721
Steven Rostedt07d777f2011-09-22 14:01:55 -04002722 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002723 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002724 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002725 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002726 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002727
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002728out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002729 put_trace_buf();
2730
2731out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002732 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002733 unpause_graph_tracing();
2734
2735 return len;
2736}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002737EXPORT_SYMBOL_GPL(trace_vbprintk);
2738
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002739static int
2740__trace_array_vprintk(struct ring_buffer *buffer,
2741 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002742{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002743 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002744 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002745 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002746 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002747 unsigned long flags;
2748 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002749
2750 if (tracing_disabled || tracing_selftest_running)
2751 return 0;
2752
Steven Rostedt07d777f2011-09-22 14:01:55 -04002753 /* Don't pollute graph traces with trace_vprintk internals */
2754 pause_graph_tracing();
2755
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002756 pc = preempt_count();
2757 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002758
Steven Rostedt07d777f2011-09-22 14:01:55 -04002759
2760 tbuffer = get_trace_buf();
2761 if (!tbuffer) {
2762 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002763 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002764 }
2765
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002766 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002767
Steven Rostedt07d777f2011-09-22 14:01:55 -04002768 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002769 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002770 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2771 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002772 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002773 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002774 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002775 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002776
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002777 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002778 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002779 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002780 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002781 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002782
2783out:
2784 put_trace_buf();
2785
2786out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002787 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002788 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002789
2790 return len;
2791}
Steven Rostedt659372d2009-09-03 19:11:07 -04002792
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002793int trace_array_vprintk(struct trace_array *tr,
2794 unsigned long ip, const char *fmt, va_list args)
2795{
2796 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2797}
2798
2799int trace_array_printk(struct trace_array *tr,
2800 unsigned long ip, const char *fmt, ...)
2801{
2802 int ret;
2803 va_list ap;
2804
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002805 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002806 return 0;
2807
2808 va_start(ap, fmt);
2809 ret = trace_array_vprintk(tr, ip, fmt, ap);
2810 va_end(ap);
2811 return ret;
2812}
2813
2814int trace_array_printk_buf(struct ring_buffer *buffer,
2815 unsigned long ip, const char *fmt, ...)
2816{
2817 int ret;
2818 va_list ap;
2819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002821 return 0;
2822
2823 va_start(ap, fmt);
2824 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2825 va_end(ap);
2826 return ret;
2827}
2828
Steven Rostedt659372d2009-09-03 19:11:07 -04002829int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2830{
Steven Rostedta813a152009-10-09 01:41:35 -04002831 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002832}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002833EXPORT_SYMBOL_GPL(trace_vprintk);
2834
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002835static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002836{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002837 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2838
Steven Rostedt5a90f572008-09-03 17:42:51 -04002839 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002840 if (buf_iter)
2841 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002842}
2843
Ingo Molnare309b412008-05-12 21:20:51 +02002844static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002845peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2846 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002847{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002848 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002849 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002850
Steven Rostedtd7690412008-10-01 00:29:53 -04002851 if (buf_iter)
2852 event = ring_buffer_iter_peek(buf_iter, ts);
2853 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002854 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002855 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002856
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002857 if (event) {
2858 iter->ent_size = ring_buffer_event_length(event);
2859 return ring_buffer_event_data(event);
2860 }
2861 iter->ent_size = 0;
2862 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002863}
Steven Rostedtd7690412008-10-01 00:29:53 -04002864
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002865static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002866__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2867 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002868{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002869 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002870 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002871 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002872 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002873 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002874 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002875 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002876 int cpu;
2877
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002878 /*
2879 * If we are in a per_cpu trace file, don't bother by iterating over
2880 * all cpu and peek directly.
2881 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002882 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002883 if (ring_buffer_empty_cpu(buffer, cpu_file))
2884 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002885 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002886 if (ent_cpu)
2887 *ent_cpu = cpu_file;
2888
2889 return ent;
2890 }
2891
Steven Rostedtab464282008-05-12 21:21:00 +02002892 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002893
2894 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002895 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002896
Steven Rostedtbc21b472010-03-31 19:49:26 -04002897 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002898
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02002899 /*
2900 * Pick the entry with the smallest timestamp:
2901 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002902 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002903 next = ent;
2904 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002905 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002906 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002907 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002908 }
2909 }
2910
Steven Rostedt12b5da32012-03-27 10:43:28 -04002911 iter->ent_size = next_size;
2912
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002913 if (ent_cpu)
2914 *ent_cpu = next_cpu;
2915
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002916 if (ent_ts)
2917 *ent_ts = next_ts;
2918
Steven Rostedtbc21b472010-03-31 19:49:26 -04002919 if (missing_events)
2920 *missing_events = next_lost;
2921
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002922 return next;
2923}
2924
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002925/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002926struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2927 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002928{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002929 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002930}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002931
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002932/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002933void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002934{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002935 iter->ent = __find_next_entry(iter, &iter->cpu,
2936 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002937
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002938 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002939 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002940
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002941 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002942}
2943
Ingo Molnare309b412008-05-12 21:20:51 +02002944static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002945{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002946 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002947 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002948}
2949
Ingo Molnare309b412008-05-12 21:20:51 +02002950static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002951{
2952 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002953 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002954 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002955
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002956 WARN_ON_ONCE(iter->leftover);
2957
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002958 (*pos)++;
2959
2960 /* can't go backwards */
2961 if (iter->idx > i)
2962 return NULL;
2963
2964 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002965 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002966 else
2967 ent = iter;
2968
2969 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002970 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002971
2972 iter->pos = *pos;
2973
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002974 return ent;
2975}
2976
Jason Wessel955b61e2010-08-05 09:22:23 -05002977void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002978{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002979 struct ring_buffer_event *event;
2980 struct ring_buffer_iter *buf_iter;
2981 unsigned long entries = 0;
2982 u64 ts;
2983
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002984 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002985
Steven Rostedt6d158a82012-06-27 20:46:14 -04002986 buf_iter = trace_buffer_iter(iter, cpu);
2987 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002988 return;
2989
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002990 ring_buffer_iter_reset(buf_iter);
2991
2992 /*
2993 * We could have the case with the max latency tracers
2994 * that a reset never took place on a cpu. This is evident
2995 * by the timestamp being before the start of the buffer.
2996 */
2997 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002998 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002999 break;
3000 entries++;
3001 ring_buffer_read(buf_iter, NULL);
3002 }
3003
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003004 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003005}
3006
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003007/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003008 * The current tracer is copied to avoid a global locking
3009 * all around.
3010 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003011static void *s_start(struct seq_file *m, loff_t *pos)
3012{
3013 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003014 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003015 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003016 void *p = NULL;
3017 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003018 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003019
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003020 /*
3021 * copy the tracer to avoid using a global lock all around.
3022 * iter->trace is a copy of current_trace, the pointer to the
3023 * name may be used instead of a strcmp(), as iter->trace->name
3024 * will point to the same string as current_trace->name.
3025 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003026 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003027 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3028 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003029 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003030
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003031#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003032 if (iter->snapshot && iter->trace->use_max_tr)
3033 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003034#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003035
3036 if (!iter->snapshot)
3037 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003038
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003039 if (*pos != iter->pos) {
3040 iter->ent = NULL;
3041 iter->cpu = 0;
3042 iter->idx = -1;
3043
Steven Rostedtae3b5092013-01-23 15:22:59 -05003044 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003045 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003046 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003047 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003048 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003049
Lai Jiangshanac91d852010-03-02 17:54:50 +08003050 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003051 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3052 ;
3053
3054 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003055 /*
3056 * If we overflowed the seq_file before, then we want
3057 * to just reuse the trace_seq buffer again.
3058 */
3059 if (iter->leftover)
3060 p = iter;
3061 else {
3062 l = *pos - 1;
3063 p = s_next(m, p, &l);
3064 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003065 }
3066
Lai Jiangshan4f535962009-05-18 19:35:34 +08003067 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003068 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003069 return p;
3070}
3071
3072static void s_stop(struct seq_file *m, void *p)
3073{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003074 struct trace_iterator *iter = m->private;
3075
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003076#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003077 if (iter->snapshot && iter->trace->use_max_tr)
3078 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003079#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003080
3081 if (!iter->snapshot)
3082 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003083
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003084 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003085 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003086}
3087
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003088static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003089get_total_entries(struct trace_buffer *buf,
3090 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003091{
3092 unsigned long count;
3093 int cpu;
3094
3095 *total = 0;
3096 *entries = 0;
3097
3098 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003099 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003100 /*
3101 * If this buffer has skipped entries, then we hold all
3102 * entries for the trace and we need to ignore the
3103 * ones before the time stamp.
3104 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003105 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3106 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003107 /* total is the same as the entries */
3108 *total += count;
3109 } else
3110 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003111 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003112 *entries += count;
3113 }
3114}
3115
Ingo Molnare309b412008-05-12 21:20:51 +02003116static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003117{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003118 seq_puts(m, "# _------=> CPU# \n"
3119 "# / _-----=> irqs-off \n"
3120 "# | / _----=> need-resched \n"
3121 "# || / _---=> hardirq/softirq \n"
3122 "# ||| / _--=> preempt-depth \n"
3123 "# |||| / delay \n"
3124 "# cmd pid ||||| time | caller \n"
3125 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003126}
3127
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003128static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003129{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003130 unsigned long total;
3131 unsigned long entries;
3132
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003133 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003134 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3135 entries, total, num_online_cpus());
3136 seq_puts(m, "#\n");
3137}
3138
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003139static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003140{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003141 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003142 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
3143 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003144}
3145
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003146static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003147{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003148 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003149 seq_puts(m, "# _-----=> irqs-off\n"
3150 "# / _----=> need-resched\n"
3151 "# | / _---=> hardirq/softirq\n"
3152 "# || / _--=> preempt-depth\n"
3153 "# ||| / delay\n"
3154 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
3155 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003156}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003157
Jiri Olsa62b915f2010-04-02 19:01:22 +02003158void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003159print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3160{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003161 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003162 struct trace_buffer *buf = iter->trace_buffer;
3163 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003164 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003165 unsigned long entries;
3166 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003167 const char *name = "preemption";
3168
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003169 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003170
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003171 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003172
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003173 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003174 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003175 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003176 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003177 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003178 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003179 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003180 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003181 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003182 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003183#if defined(CONFIG_PREEMPT_NONE)
3184 "server",
3185#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3186 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003187#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003188 "preempt",
3189#else
3190 "unknown",
3191#endif
3192 /* These are reserved for later use */
3193 0, 0, 0, 0);
3194#ifdef CONFIG_SMP
3195 seq_printf(m, " #P:%d)\n", num_online_cpus());
3196#else
3197 seq_puts(m, ")\n");
3198#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003199 seq_puts(m, "# -----------------\n");
3200 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003201 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003202 data->comm, data->pid,
3203 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003204 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003205 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003206
3207 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003208 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003209 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3210 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003211 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003212 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3213 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003214 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003215 }
3216
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003217 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003218}
3219
Steven Rostedta3097202008-11-07 22:36:02 -05003220static void test_cpu_buff_start(struct trace_iterator *iter)
3221{
3222 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003223 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003224
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003225 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003226 return;
3227
3228 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3229 return;
3230
Sasha Levin919cd972015-09-04 12:45:56 -04003231 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003232 return;
3233
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003234 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003235 return;
3236
Sasha Levin919cd972015-09-04 12:45:56 -04003237 if (iter->started)
3238 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003239
3240 /* Don't print started cpu buffer for the first entry of the trace */
3241 if (iter->idx > 1)
3242 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3243 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003244}
3245
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003246static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003247{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003248 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003249 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003250 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003251 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003252 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003253
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003254 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003255
Steven Rostedta3097202008-11-07 22:36:02 -05003256 test_cpu_buff_start(iter);
3257
Steven Rostedtf633cef2008-12-23 23:24:13 -05003258 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003259
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003260 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003261 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3262 trace_print_lat_context(iter);
3263 else
3264 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003265 }
3266
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003267 if (trace_seq_has_overflowed(s))
3268 return TRACE_TYPE_PARTIAL_LINE;
3269
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003270 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003271 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003272
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003273 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003274
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003275 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003276}
3277
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003278static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003279{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003280 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003281 struct trace_seq *s = &iter->seq;
3282 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003283 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003284
3285 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003286
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003287 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003288 trace_seq_printf(s, "%d %d %llu ",
3289 entry->pid, iter->cpu, iter->ts);
3290
3291 if (trace_seq_has_overflowed(s))
3292 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003293
Steven Rostedtf633cef2008-12-23 23:24:13 -05003294 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003295 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003296 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003297
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003298 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003299
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003300 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003301}
3302
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003303static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003304{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003305 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003306 struct trace_seq *s = &iter->seq;
3307 unsigned char newline = '\n';
3308 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003309 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003310
3311 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003312
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003313 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003314 SEQ_PUT_HEX_FIELD(s, entry->pid);
3315 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3316 SEQ_PUT_HEX_FIELD(s, iter->ts);
3317 if (trace_seq_has_overflowed(s))
3318 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003319 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003320
Steven Rostedtf633cef2008-12-23 23:24:13 -05003321 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003322 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003323 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003324 if (ret != TRACE_TYPE_HANDLED)
3325 return ret;
3326 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003327
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003328 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003329
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003330 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003331}
3332
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003333static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003334{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003335 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003336 struct trace_seq *s = &iter->seq;
3337 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003338 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003339
3340 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003341
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003342 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003343 SEQ_PUT_FIELD(s, entry->pid);
3344 SEQ_PUT_FIELD(s, iter->cpu);
3345 SEQ_PUT_FIELD(s, iter->ts);
3346 if (trace_seq_has_overflowed(s))
3347 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003348 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003349
Steven Rostedtf633cef2008-12-23 23:24:13 -05003350 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003351 return event ? event->funcs->binary(iter, 0, event) :
3352 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003353}
3354
Jiri Olsa62b915f2010-04-02 19:01:22 +02003355int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003356{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003357 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003358 int cpu;
3359
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003360 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003361 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003362 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003363 buf_iter = trace_buffer_iter(iter, cpu);
3364 if (buf_iter) {
3365 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003366 return 0;
3367 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003368 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003369 return 0;
3370 }
3371 return 1;
3372 }
3373
Steven Rostedtab464282008-05-12 21:21:00 +02003374 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003375 buf_iter = trace_buffer_iter(iter, cpu);
3376 if (buf_iter) {
3377 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003378 return 0;
3379 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003380 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003381 return 0;
3382 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003383 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003384
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003385 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003386}
3387
Lai Jiangshan4f535962009-05-18 19:35:34 +08003388/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003389enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003390{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003391 struct trace_array *tr = iter->tr;
3392 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003393 enum print_line_t ret;
3394
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003395 if (iter->lost_events) {
3396 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3397 iter->cpu, iter->lost_events);
3398 if (trace_seq_has_overflowed(&iter->seq))
3399 return TRACE_TYPE_PARTIAL_LINE;
3400 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003401
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003402 if (iter->trace && iter->trace->print_line) {
3403 ret = iter->trace->print_line(iter);
3404 if (ret != TRACE_TYPE_UNHANDLED)
3405 return ret;
3406 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003407
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003408 if (iter->ent->type == TRACE_BPUTS &&
3409 trace_flags & TRACE_ITER_PRINTK &&
3410 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3411 return trace_print_bputs_msg_only(iter);
3412
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003413 if (iter->ent->type == TRACE_BPRINT &&
3414 trace_flags & TRACE_ITER_PRINTK &&
3415 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003416 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003417
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003418 if (iter->ent->type == TRACE_PRINT &&
3419 trace_flags & TRACE_ITER_PRINTK &&
3420 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003421 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003422
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003423 if (trace_flags & TRACE_ITER_BIN)
3424 return print_bin_fmt(iter);
3425
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003426 if (trace_flags & TRACE_ITER_HEX)
3427 return print_hex_fmt(iter);
3428
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003429 if (trace_flags & TRACE_ITER_RAW)
3430 return print_raw_fmt(iter);
3431
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003432 return print_trace_fmt(iter);
3433}
3434
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003435void trace_latency_header(struct seq_file *m)
3436{
3437 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003438 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003439
3440 /* print nothing if the buffers are empty */
3441 if (trace_empty(iter))
3442 return;
3443
3444 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3445 print_trace_header(m, iter);
3446
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003447 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003448 print_lat_help_header(m);
3449}
3450
Jiri Olsa62b915f2010-04-02 19:01:22 +02003451void trace_default_header(struct seq_file *m)
3452{
3453 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003454 struct trace_array *tr = iter->tr;
3455 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003456
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003457 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3458 return;
3459
Jiri Olsa62b915f2010-04-02 19:01:22 +02003460 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3461 /* print nothing if the buffers are empty */
3462 if (trace_empty(iter))
3463 return;
3464 print_trace_header(m, iter);
3465 if (!(trace_flags & TRACE_ITER_VERBOSE))
3466 print_lat_help_header(m);
3467 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003468 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3469 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003470 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003471 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003472 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003473 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003474 }
3475}
3476
Steven Rostedte0a413f2011-09-29 21:26:16 -04003477static void test_ftrace_alive(struct seq_file *m)
3478{
3479 if (!ftrace_is_dead())
3480 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003481 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3482 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003483}
3484
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003485#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003486static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003487{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003488 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3489 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3490 "# Takes a snapshot of the main buffer.\n"
3491 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3492 "# (Doesn't have to be '2' works with any number that\n"
3493 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003494}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003495
3496static void show_snapshot_percpu_help(struct seq_file *m)
3497{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003498 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003499#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003500 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3501 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003502#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003503 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3504 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003505#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003506 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3507 "# (Doesn't have to be '2' works with any number that\n"
3508 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003509}
3510
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003511static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3512{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003513 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003514 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003515 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003516 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003517
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003518 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003519 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3520 show_snapshot_main_help(m);
3521 else
3522 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003523}
3524#else
3525/* Should never be called */
3526static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3527#endif
3528
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003529static int s_show(struct seq_file *m, void *v)
3530{
3531 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003532 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533
3534 if (iter->ent == NULL) {
3535 if (iter->tr) {
3536 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3537 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003538 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003539 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003540 if (iter->snapshot && trace_empty(iter))
3541 print_snapshot_help(m, iter);
3542 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003543 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003544 else
3545 trace_default_header(m);
3546
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003547 } else if (iter->leftover) {
3548 /*
3549 * If we filled the seq_file buffer earlier, we
3550 * want to just show it now.
3551 */
3552 ret = trace_print_seq(m, &iter->seq);
3553
3554 /* ret should this time be zero, but you never know */
3555 iter->leftover = ret;
3556
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003557 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003558 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003559 ret = trace_print_seq(m, &iter->seq);
3560 /*
3561 * If we overflow the seq_file buffer, then it will
3562 * ask us for this data again at start up.
3563 * Use that instead.
3564 * ret is 0 if seq_file write succeeded.
3565 * -1 otherwise.
3566 */
3567 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003568 }
3569
3570 return 0;
3571}
3572
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003573/*
3574 * Should be used after trace_array_get(), trace_types_lock
3575 * ensures that i_cdev was already initialized.
3576 */
3577static inline int tracing_get_cpu(struct inode *inode)
3578{
3579 if (inode->i_cdev) /* See trace_create_cpu_file() */
3580 return (long)inode->i_cdev - 1;
3581 return RING_BUFFER_ALL_CPUS;
3582}
3583
James Morris88e9d342009-09-22 16:43:43 -07003584static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003585 .start = s_start,
3586 .next = s_next,
3587 .stop = s_stop,
3588 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003589};
3590
Ingo Molnare309b412008-05-12 21:20:51 +02003591static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003592__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003593{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003594 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003595 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003596 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003597
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003598 if (tracing_disabled)
3599 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003600
Jiri Olsa50e18b92012-04-25 10:23:39 +02003601 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003602 if (!iter)
3603 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003604
Gil Fruchter72917232015-06-09 10:32:35 +03003605 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003606 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003607 if (!iter->buffer_iter)
3608 goto release;
3609
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003610 /*
3611 * We make a copy of the current tracer to avoid concurrent
3612 * changes on it while we are reading.
3613 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003614 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003615 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003616 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003617 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003618
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003619 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003620
Li Zefan79f55992009-06-15 14:58:26 +08003621 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003622 goto fail;
3623
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003624 iter->tr = tr;
3625
3626#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003627 /* Currently only the top directory has a snapshot */
3628 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003629 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003630 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003631#endif
3632 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003633 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003634 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003635 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003636 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003637
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003638 /* Notify the tracer early; before we stop tracing. */
3639 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003640 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003641
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003642 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003643 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003644 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3645
David Sharp8be07092012-11-13 12:18:22 -08003646 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003647 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003648 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3649
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003650 /* stop the trace while dumping if we are not opening "snapshot" */
3651 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003652 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003653
Steven Rostedtae3b5092013-01-23 15:22:59 -05003654 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003655 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003656 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003657 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003658 }
3659 ring_buffer_read_prepare_sync();
3660 for_each_tracing_cpu(cpu) {
3661 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003662 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003663 }
3664 } else {
3665 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003666 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003667 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003668 ring_buffer_read_prepare_sync();
3669 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003670 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003671 }
3672
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003673 mutex_unlock(&trace_types_lock);
3674
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003675 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003676
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003677 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003678 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003679 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003680 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003681release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003682 seq_release_private(inode, file);
3683 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003684}
3685
3686int tracing_open_generic(struct inode *inode, struct file *filp)
3687{
Steven Rostedt60a11772008-05-12 21:20:44 +02003688 if (tracing_disabled)
3689 return -ENODEV;
3690
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003691 filp->private_data = inode->i_private;
3692 return 0;
3693}
3694
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003695bool tracing_is_disabled(void)
3696{
3697 return (tracing_disabled) ? true: false;
3698}
3699
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003700/*
3701 * Open and update trace_array ref count.
3702 * Must have the current trace_array passed to it.
3703 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003704static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003705{
3706 struct trace_array *tr = inode->i_private;
3707
3708 if (tracing_disabled)
3709 return -ENODEV;
3710
3711 if (trace_array_get(tr) < 0)
3712 return -ENODEV;
3713
3714 filp->private_data = inode->i_private;
3715
3716 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003717}
3718
Hannes Eder4fd27352009-02-10 19:44:12 +01003719static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003720{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003721 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003722 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003723 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003724 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003725
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003726 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003727 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003728 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003729 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003730
Oleg Nesterov6484c712013-07-23 17:26:10 +02003731 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003732 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003733 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003734
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003735 for_each_tracing_cpu(cpu) {
3736 if (iter->buffer_iter[cpu])
3737 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3738 }
3739
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003740 if (iter->trace && iter->trace->close)
3741 iter->trace->close(iter);
3742
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003743 if (!iter->snapshot)
3744 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003745 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003746
3747 __trace_array_put(tr);
3748
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003749 mutex_unlock(&trace_types_lock);
3750
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003751 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003752 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003753 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003754 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003755 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003756
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003757 return 0;
3758}
3759
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003760static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3761{
3762 struct trace_array *tr = inode->i_private;
3763
3764 trace_array_put(tr);
3765 return 0;
3766}
3767
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003768static int tracing_single_release_tr(struct inode *inode, struct file *file)
3769{
3770 struct trace_array *tr = inode->i_private;
3771
3772 trace_array_put(tr);
3773
3774 return single_release(inode, file);
3775}
3776
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003777static int tracing_open(struct inode *inode, struct file *file)
3778{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003779 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003780 struct trace_iterator *iter;
3781 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003782
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003783 if (trace_array_get(tr) < 0)
3784 return -ENODEV;
3785
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003786 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003787 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3788 int cpu = tracing_get_cpu(inode);
3789
3790 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003791 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003792 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003793 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003794 }
3795
3796 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003797 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003798 if (IS_ERR(iter))
3799 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003800 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003801 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3802 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003803
3804 if (ret < 0)
3805 trace_array_put(tr);
3806
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003807 return ret;
3808}
3809
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003810/*
3811 * Some tracers are not suitable for instance buffers.
3812 * A tracer is always available for the global array (toplevel)
3813 * or if it explicitly states that it is.
3814 */
3815static bool
3816trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3817{
3818 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3819}
3820
3821/* Find the next tracer that this trace array may use */
3822static struct tracer *
3823get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3824{
3825 while (t && !trace_ok_for_array(t, tr))
3826 t = t->next;
3827
3828 return t;
3829}
3830
Ingo Molnare309b412008-05-12 21:20:51 +02003831static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003832t_next(struct seq_file *m, void *v, loff_t *pos)
3833{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003834 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003835 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003836
3837 (*pos)++;
3838
3839 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003840 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003841
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003842 return t;
3843}
3844
3845static void *t_start(struct seq_file *m, loff_t *pos)
3846{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003847 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003848 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003849 loff_t l = 0;
3850
3851 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003852
3853 t = get_tracer_for_array(tr, trace_types);
3854 for (; t && l < *pos; t = t_next(m, t, &l))
3855 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003856
3857 return t;
3858}
3859
3860static void t_stop(struct seq_file *m, void *p)
3861{
3862 mutex_unlock(&trace_types_lock);
3863}
3864
3865static int t_show(struct seq_file *m, void *v)
3866{
3867 struct tracer *t = v;
3868
3869 if (!t)
3870 return 0;
3871
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003872 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003873 if (t->next)
3874 seq_putc(m, ' ');
3875 else
3876 seq_putc(m, '\n');
3877
3878 return 0;
3879}
3880
James Morris88e9d342009-09-22 16:43:43 -07003881static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003882 .start = t_start,
3883 .next = t_next,
3884 .stop = t_stop,
3885 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003886};
3887
3888static int show_traces_open(struct inode *inode, struct file *file)
3889{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003890 struct trace_array *tr = inode->i_private;
3891 struct seq_file *m;
3892 int ret;
3893
Steven Rostedt60a11772008-05-12 21:20:44 +02003894 if (tracing_disabled)
3895 return -ENODEV;
3896
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003897 ret = seq_open(file, &show_traces_seq_ops);
3898 if (ret)
3899 return ret;
3900
3901 m = file->private_data;
3902 m->private = tr;
3903
3904 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003905}
3906
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003907static ssize_t
3908tracing_write_stub(struct file *filp, const char __user *ubuf,
3909 size_t count, loff_t *ppos)
3910{
3911 return count;
3912}
3913
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003914loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003915{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003916 int ret;
3917
Slava Pestov364829b2010-11-24 15:13:16 -08003918 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003919 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003920 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003921 file->f_pos = ret = 0;
3922
3923 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003924}
3925
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003926static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003927 .open = tracing_open,
3928 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003929 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003930 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003931 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003932};
3933
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003934static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003935 .open = show_traces_open,
3936 .read = seq_read,
3937 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003938 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003939};
3940
Ingo Molnar36dfe922008-05-12 21:20:52 +02003941/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003942 * The tracer itself will not take this lock, but still we want
3943 * to provide a consistent cpumask to user-space:
3944 */
3945static DEFINE_MUTEX(tracing_cpumask_update_lock);
3946
3947/*
3948 * Temporary storage for the character representation of the
3949 * CPU bitmask (and one more byte for the newline):
3950 */
3951static char mask_str[NR_CPUS + 1];
3952
Ingo Molnarc7078de2008-05-12 21:20:52 +02003953static ssize_t
3954tracing_cpumask_read(struct file *filp, char __user *ubuf,
3955 size_t count, loff_t *ppos)
3956{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003957 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003958 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003959
3960 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003961
Tejun Heo1a402432015-02-13 14:37:39 -08003962 len = snprintf(mask_str, count, "%*pb\n",
3963 cpumask_pr_args(tr->tracing_cpumask));
3964 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003965 count = -EINVAL;
3966 goto out_err;
3967 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003968 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3969
3970out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003971 mutex_unlock(&tracing_cpumask_update_lock);
3972
3973 return count;
3974}
3975
3976static ssize_t
3977tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3978 size_t count, loff_t *ppos)
3979{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003980 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303981 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003982 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303983
3984 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3985 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003986
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303987 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003988 if (err)
3989 goto err_unlock;
3990
Li Zefan215368e2009-06-15 10:56:42 +08003991 mutex_lock(&tracing_cpumask_update_lock);
3992
Steven Rostedta5e25882008-12-02 15:34:05 -05003993 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003994 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003995 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003996 /*
3997 * Increase/decrease the disabled counter if we are
3998 * about to flip a bit in the cpumask:
3999 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004000 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304001 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004002 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4003 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004004 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004005 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304006 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004007 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4008 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004009 }
4010 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004011 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004012 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004013
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004014 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004015
Ingo Molnarc7078de2008-05-12 21:20:52 +02004016 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304017 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004018
Ingo Molnarc7078de2008-05-12 21:20:52 +02004019 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004020
4021err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004022 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004023
4024 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004025}
4026
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004027static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004028 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004029 .read = tracing_cpumask_read,
4030 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004031 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004032 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004033};
4034
Li Zefanfdb372e2009-12-08 11:15:59 +08004035static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004036{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004037 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004038 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004039 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004040 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004041
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004042 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004043 tracer_flags = tr->current_trace->flags->val;
4044 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004046 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004047 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004048 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004049 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004050 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004051 }
4052
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004053 for (i = 0; trace_opts[i].name; i++) {
4054 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004055 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004056 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004057 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004058 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004059 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004060
Li Zefanfdb372e2009-12-08 11:15:59 +08004061 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004062}
4063
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004064static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004065 struct tracer_flags *tracer_flags,
4066 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004067{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004068 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004069 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004070
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004071 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004072 if (ret)
4073 return ret;
4074
4075 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004076 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004077 else
Zhaolei77708412009-08-07 18:53:21 +08004078 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004079 return 0;
4080}
4081
Li Zefan8d18eaa2009-12-08 11:17:06 +08004082/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004083static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004084{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004085 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004086 struct tracer_flags *tracer_flags = trace->flags;
4087 struct tracer_opt *opts = NULL;
4088 int i;
4089
4090 for (i = 0; tracer_flags->opts[i].name; i++) {
4091 opts = &tracer_flags->opts[i];
4092
4093 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004094 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004095 }
4096
4097 return -EINVAL;
4098}
4099
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004100/* Some tracers require overwrite to stay enabled */
4101int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4102{
4103 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4104 return -1;
4105
4106 return 0;
4107}
4108
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004109int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004110{
4111 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004112 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004113 return 0;
4114
4115 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004116 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004117 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004118 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004119
4120 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004121 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004122 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004123 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004124
4125 if (mask == TRACE_ITER_RECORD_CMD)
4126 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004127
Steven Rostedtc37775d2016-04-13 16:59:18 -04004128 if (mask == TRACE_ITER_EVENT_FORK)
4129 trace_event_follow_fork(tr, enabled);
4130
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004131 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004132 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004133#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004134 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004135#endif
4136 }
Steven Rostedt81698832012-10-11 10:15:05 -04004137
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004138 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004139 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004140 trace_printk_control(enabled);
4141 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004142
4143 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004144}
4145
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004146static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004147{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004148 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004149 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004150 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004151 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004152 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004153
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004154 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004155
Li Zefan8d18eaa2009-12-08 11:17:06 +08004156 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004157 neg = 1;
4158 cmp += 2;
4159 }
4160
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004161 mutex_lock(&trace_types_lock);
4162
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004163 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004164 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004165 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004166 break;
4167 }
4168 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004169
4170 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004171 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004172 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004173
4174 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004175
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004176 /*
4177 * If the first trailing whitespace is replaced with '\0' by strstrip,
4178 * turn it back into a space.
4179 */
4180 if (orig_len > strlen(option))
4181 option[strlen(option)] = ' ';
4182
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004183 return ret;
4184}
4185
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004186static void __init apply_trace_boot_options(void)
4187{
4188 char *buf = trace_boot_options_buf;
4189 char *option;
4190
4191 while (true) {
4192 option = strsep(&buf, ",");
4193
4194 if (!option)
4195 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004196
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004197 if (*option)
4198 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004199
4200 /* Put back the comma to allow this to be called again */
4201 if (buf)
4202 *(buf - 1) = ',';
4203 }
4204}
4205
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004206static ssize_t
4207tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4208 size_t cnt, loff_t *ppos)
4209{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004210 struct seq_file *m = filp->private_data;
4211 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004212 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004213 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004214
4215 if (cnt >= sizeof(buf))
4216 return -EINVAL;
4217
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004218 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004219 return -EFAULT;
4220
Steven Rostedta8dd2172013-01-09 20:54:17 -05004221 buf[cnt] = 0;
4222
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004223 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004224 if (ret < 0)
4225 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004226
Jiri Olsacf8517c2009-10-23 19:36:16 -04004227 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004228
4229 return cnt;
4230}
4231
Li Zefanfdb372e2009-12-08 11:15:59 +08004232static int tracing_trace_options_open(struct inode *inode, struct file *file)
4233{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004234 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004235 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004236
Li Zefanfdb372e2009-12-08 11:15:59 +08004237 if (tracing_disabled)
4238 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004239
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004240 if (trace_array_get(tr) < 0)
4241 return -ENODEV;
4242
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004243 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4244 if (ret < 0)
4245 trace_array_put(tr);
4246
4247 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004248}
4249
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004250static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004251 .open = tracing_trace_options_open,
4252 .read = seq_read,
4253 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004254 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004255 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004256};
4257
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004258static const char readme_msg[] =
4259 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004260 "# echo 0 > tracing_on : quick way to disable tracing\n"
4261 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4262 " Important files:\n"
4263 " trace\t\t\t- The static contents of the buffer\n"
4264 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4265 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4266 " current_tracer\t- function and latency tracers\n"
4267 " available_tracers\t- list of configured tracers for current_tracer\n"
4268 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4269 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4270 " trace_clock\t\t-change the clock used to order events\n"
4271 " local: Per cpu clock but may not be synced across CPUs\n"
4272 " global: Synced across CPUs but slows tracing down.\n"
4273 " counter: Not a clock, but just an increment\n"
4274 " uptime: Jiffy counter from time of boot\n"
4275 " perf: Same clock that perf events use\n"
4276#ifdef CONFIG_X86_64
4277 " x86-tsc: TSC cycle counter\n"
4278#endif
4279 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004280 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004281 " tracing_cpumask\t- Limit which CPUs to trace\n"
4282 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4283 "\t\t\t Remove sub-buffer with rmdir\n"
4284 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004285 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4286 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004287 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004288#ifdef CONFIG_DYNAMIC_FTRACE
4289 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004290 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4291 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004292 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004293 "\t modules: Can select a group via module\n"
4294 "\t Format: :mod:<module-name>\n"
4295 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4296 "\t triggers: a command to perform when function is hit\n"
4297 "\t Format: <function>:<trigger>[:count]\n"
4298 "\t trigger: traceon, traceoff\n"
4299 "\t\t enable_event:<system>:<event>\n"
4300 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004301#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004302 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004303#endif
4304#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004305 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004306#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004307 "\t\t dump\n"
4308 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004309 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4310 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4311 "\t The first one will disable tracing every time do_fault is hit\n"
4312 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4313 "\t The first time do trap is hit and it disables tracing, the\n"
4314 "\t counter will decrement to 2. If tracing is already disabled,\n"
4315 "\t the counter will not decrement. It only decrements when the\n"
4316 "\t trigger did work\n"
4317 "\t To remove trigger without count:\n"
4318 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4319 "\t To remove trigger with a count:\n"
4320 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004321 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004322 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4323 "\t modules: Can select a group via module command :mod:\n"
4324 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004325#endif /* CONFIG_DYNAMIC_FTRACE */
4326#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004327 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4328 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004329#endif
4330#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4331 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004332 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004333 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4334#endif
4335#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004336 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4337 "\t\t\t snapshot buffer. Read the contents for more\n"
4338 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004339#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004340#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004341 " stack_trace\t\t- Shows the max stack trace when active\n"
4342 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004343 "\t\t\t Write into this file to reset the max size (trigger a\n"
4344 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004345#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004346 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4347 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004348#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004349#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu86425622016-08-18 17:58:15 +09004350#ifdef CONFIG_KPROBE_EVENT
4351 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4352 "\t\t\t Write into this file to define/undefine new trace events.\n"
4353#endif
4354#ifdef CONFIG_UPROBE_EVENT
4355 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4356 "\t\t\t Write into this file to define/undefine new trace events.\n"
4357#endif
4358#if defined(CONFIG_KPROBE_EVENT) || defined(CONFIG_UPROBE_EVENT)
4359 "\t accepts: event-definitions (one definition per line)\n"
4360 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4361 "\t -:[<group>/]<event>\n"
4362#ifdef CONFIG_KPROBE_EVENT
4363 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4364#endif
4365#ifdef CONFIG_UPROBE_EVENT
4366 "\t place: <path>:<offset>\n"
4367#endif
4368 "\t args: <name>=fetcharg[:type]\n"
4369 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4370 "\t $stack<index>, $stack, $retval, $comm\n"
4371 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4372 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4373#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004374 " events/\t\t- Directory containing all trace event subsystems:\n"
4375 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4376 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004377 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4378 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004379 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004380 " events/<system>/<event>/\t- Directory containing control files for\n"
4381 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004382 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4383 " filter\t\t- If set, only events passing filter are traced\n"
4384 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004385 "\t Format: <trigger>[:count][if <filter>]\n"
4386 "\t trigger: traceon, traceoff\n"
4387 "\t enable_event:<system>:<event>\n"
4388 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004389#ifdef CONFIG_HIST_TRIGGERS
4390 "\t enable_hist:<system>:<event>\n"
4391 "\t disable_hist:<system>:<event>\n"
4392#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004393#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004394 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004395#endif
4396#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004397 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004398#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004399#ifdef CONFIG_HIST_TRIGGERS
4400 "\t\t hist (see below)\n"
4401#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004402 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4403 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4404 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4405 "\t events/block/block_unplug/trigger\n"
4406 "\t The first disables tracing every time block_unplug is hit.\n"
4407 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4408 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4409 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4410 "\t Like function triggers, the counter is only decremented if it\n"
4411 "\t enabled or disabled tracing.\n"
4412 "\t To remove a trigger without a count:\n"
4413 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4414 "\t To remove a trigger with a count:\n"
4415 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4416 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004417#ifdef CONFIG_HIST_TRIGGERS
4418 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004419 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004420 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004421 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004422 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004423 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004424 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004425 "\t [if <filter>]\n\n"
4426 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004427 "\t table using the key(s) and value(s) named, and the value of a\n"
4428 "\t sum called 'hitcount' is incremented. Keys and values\n"
4429 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004430 "\t can be any field, or the special string 'stacktrace'.\n"
4431 "\t Compound keys consisting of up to two fields can be specified\n"
4432 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4433 "\t fields. Sort keys consisting of up to two fields can be\n"
4434 "\t specified using the 'sort' keyword. The sort direction can\n"
4435 "\t be modified by appending '.descending' or '.ascending' to a\n"
4436 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004437 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4438 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4439 "\t its histogram data will be shared with other triggers of the\n"
4440 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004441 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004442 "\t table in its entirety to stdout. If there are multiple hist\n"
4443 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004444 "\t trigger in the output. The table displayed for a named\n"
4445 "\t trigger will be the same as any other instance having the\n"
4446 "\t same name. The default format used to display a given field\n"
4447 "\t can be modified by appending any of the following modifiers\n"
4448 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004449 "\t .hex display a number as a hex value\n"
4450 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004451 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004452 "\t .execname display a common_pid as a program name\n"
4453 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004454 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004455 "\t The 'pause' parameter can be used to pause an existing hist\n"
4456 "\t trigger or to start a hist trigger but not log any events\n"
4457 "\t until told to do so. 'continue' can be used to start or\n"
4458 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004459 "\t The 'clear' parameter will clear the contents of a running\n"
4460 "\t hist trigger and leave its current paused/active state\n"
4461 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004462 "\t The enable_hist and disable_hist triggers can be used to\n"
4463 "\t have one event conditionally start and stop another event's\n"
4464 "\t already-attached hist trigger. The syntax is analagous to\n"
4465 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004466#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004467;
4468
4469static ssize_t
4470tracing_readme_read(struct file *filp, char __user *ubuf,
4471 size_t cnt, loff_t *ppos)
4472{
4473 return simple_read_from_buffer(ubuf, cnt, ppos,
4474 readme_msg, strlen(readme_msg));
4475}
4476
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004477static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004478 .open = tracing_open_generic,
4479 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004480 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004481};
4482
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004483static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004484{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004485 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004486
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004487 if (*pos || m->count)
4488 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004489
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004490 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004491
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004492 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4493 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004494 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004495 continue;
4496
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004497 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004498 }
4499
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004500 return NULL;
4501}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004502
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004503static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4504{
4505 void *v;
4506 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004507
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004508 preempt_disable();
4509 arch_spin_lock(&trace_cmdline_lock);
4510
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004511 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004512 while (l <= *pos) {
4513 v = saved_cmdlines_next(m, v, &l);
4514 if (!v)
4515 return NULL;
4516 }
4517
4518 return v;
4519}
4520
4521static void saved_cmdlines_stop(struct seq_file *m, void *v)
4522{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004523 arch_spin_unlock(&trace_cmdline_lock);
4524 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004525}
4526
4527static int saved_cmdlines_show(struct seq_file *m, void *v)
4528{
4529 char buf[TASK_COMM_LEN];
4530 unsigned int *pid = v;
4531
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004532 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004533 seq_printf(m, "%d %s\n", *pid, buf);
4534 return 0;
4535}
4536
4537static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4538 .start = saved_cmdlines_start,
4539 .next = saved_cmdlines_next,
4540 .stop = saved_cmdlines_stop,
4541 .show = saved_cmdlines_show,
4542};
4543
4544static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4545{
4546 if (tracing_disabled)
4547 return -ENODEV;
4548
4549 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004550}
4551
4552static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004553 .open = tracing_saved_cmdlines_open,
4554 .read = seq_read,
4555 .llseek = seq_lseek,
4556 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004557};
4558
4559static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004560tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4561 size_t cnt, loff_t *ppos)
4562{
4563 char buf[64];
4564 int r;
4565
4566 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004567 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004568 arch_spin_unlock(&trace_cmdline_lock);
4569
4570 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4571}
4572
4573static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4574{
4575 kfree(s->saved_cmdlines);
4576 kfree(s->map_cmdline_to_pid);
4577 kfree(s);
4578}
4579
4580static int tracing_resize_saved_cmdlines(unsigned int val)
4581{
4582 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4583
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004584 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004585 if (!s)
4586 return -ENOMEM;
4587
4588 if (allocate_cmdlines_buffer(val, s) < 0) {
4589 kfree(s);
4590 return -ENOMEM;
4591 }
4592
4593 arch_spin_lock(&trace_cmdline_lock);
4594 savedcmd_temp = savedcmd;
4595 savedcmd = s;
4596 arch_spin_unlock(&trace_cmdline_lock);
4597 free_saved_cmdlines_buffer(savedcmd_temp);
4598
4599 return 0;
4600}
4601
4602static ssize_t
4603tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4604 size_t cnt, loff_t *ppos)
4605{
4606 unsigned long val;
4607 int ret;
4608
4609 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4610 if (ret)
4611 return ret;
4612
4613 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4614 if (!val || val > PID_MAX_DEFAULT)
4615 return -EINVAL;
4616
4617 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4618 if (ret < 0)
4619 return ret;
4620
4621 *ppos += cnt;
4622
4623 return cnt;
4624}
4625
4626static const struct file_operations tracing_saved_cmdlines_size_fops = {
4627 .open = tracing_open_generic,
4628 .read = tracing_saved_cmdlines_size_read,
4629 .write = tracing_saved_cmdlines_size_write,
4630};
4631
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004632#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4633static union trace_enum_map_item *
4634update_enum_map(union trace_enum_map_item *ptr)
4635{
4636 if (!ptr->map.enum_string) {
4637 if (ptr->tail.next) {
4638 ptr = ptr->tail.next;
4639 /* Set ptr to the next real item (skip head) */
4640 ptr++;
4641 } else
4642 return NULL;
4643 }
4644 return ptr;
4645}
4646
4647static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4648{
4649 union trace_enum_map_item *ptr = v;
4650
4651 /*
4652 * Paranoid! If ptr points to end, we don't want to increment past it.
4653 * This really should never happen.
4654 */
4655 ptr = update_enum_map(ptr);
4656 if (WARN_ON_ONCE(!ptr))
4657 return NULL;
4658
4659 ptr++;
4660
4661 (*pos)++;
4662
4663 ptr = update_enum_map(ptr);
4664
4665 return ptr;
4666}
4667
4668static void *enum_map_start(struct seq_file *m, loff_t *pos)
4669{
4670 union trace_enum_map_item *v;
4671 loff_t l = 0;
4672
4673 mutex_lock(&trace_enum_mutex);
4674
4675 v = trace_enum_maps;
4676 if (v)
4677 v++;
4678
4679 while (v && l < *pos) {
4680 v = enum_map_next(m, v, &l);
4681 }
4682
4683 return v;
4684}
4685
4686static void enum_map_stop(struct seq_file *m, void *v)
4687{
4688 mutex_unlock(&trace_enum_mutex);
4689}
4690
4691static int enum_map_show(struct seq_file *m, void *v)
4692{
4693 union trace_enum_map_item *ptr = v;
4694
4695 seq_printf(m, "%s %ld (%s)\n",
4696 ptr->map.enum_string, ptr->map.enum_value,
4697 ptr->map.system);
4698
4699 return 0;
4700}
4701
4702static const struct seq_operations tracing_enum_map_seq_ops = {
4703 .start = enum_map_start,
4704 .next = enum_map_next,
4705 .stop = enum_map_stop,
4706 .show = enum_map_show,
4707};
4708
4709static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4710{
4711 if (tracing_disabled)
4712 return -ENODEV;
4713
4714 return seq_open(filp, &tracing_enum_map_seq_ops);
4715}
4716
4717static const struct file_operations tracing_enum_map_fops = {
4718 .open = tracing_enum_map_open,
4719 .read = seq_read,
4720 .llseek = seq_lseek,
4721 .release = seq_release,
4722};
4723
4724static inline union trace_enum_map_item *
4725trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4726{
4727 /* Return tail of array given the head */
4728 return ptr + ptr->head.length + 1;
4729}
4730
4731static void
4732trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4733 int len)
4734{
4735 struct trace_enum_map **stop;
4736 struct trace_enum_map **map;
4737 union trace_enum_map_item *map_array;
4738 union trace_enum_map_item *ptr;
4739
4740 stop = start + len;
4741
4742 /*
4743 * The trace_enum_maps contains the map plus a head and tail item,
4744 * where the head holds the module and length of array, and the
4745 * tail holds a pointer to the next list.
4746 */
4747 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4748 if (!map_array) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07004749 pr_warn("Unable to allocate trace enum mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004750 return;
4751 }
4752
4753 mutex_lock(&trace_enum_mutex);
4754
4755 if (!trace_enum_maps)
4756 trace_enum_maps = map_array;
4757 else {
4758 ptr = trace_enum_maps;
4759 for (;;) {
4760 ptr = trace_enum_jmp_to_tail(ptr);
4761 if (!ptr->tail.next)
4762 break;
4763 ptr = ptr->tail.next;
4764
4765 }
4766 ptr->tail.next = map_array;
4767 }
4768 map_array->head.mod = mod;
4769 map_array->head.length = len;
4770 map_array++;
4771
4772 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4773 map_array->map = **map;
4774 map_array++;
4775 }
4776 memset(map_array, 0, sizeof(*map_array));
4777
4778 mutex_unlock(&trace_enum_mutex);
4779}
4780
4781static void trace_create_enum_file(struct dentry *d_tracer)
4782{
4783 trace_create_file("enum_map", 0444, d_tracer,
4784 NULL, &tracing_enum_map_fops);
4785}
4786
4787#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4788static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4789static inline void trace_insert_enum_map_file(struct module *mod,
4790 struct trace_enum_map **start, int len) { }
4791#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4792
4793static void trace_insert_enum_map(struct module *mod,
4794 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004795{
4796 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004797
4798 if (len <= 0)
4799 return;
4800
4801 map = start;
4802
4803 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004804
4805 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004806}
4807
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004808static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004809tracing_set_trace_read(struct file *filp, char __user *ubuf,
4810 size_t cnt, loff_t *ppos)
4811{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004812 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004813 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004814 int r;
4815
4816 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004817 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004818 mutex_unlock(&trace_types_lock);
4819
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004820 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004821}
4822
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004823int tracer_init(struct tracer *t, struct trace_array *tr)
4824{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004825 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004826 return t->init(tr);
4827}
4828
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004829static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004830{
4831 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004832
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004833 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004834 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004835}
4836
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004837#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004838/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004839static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4840 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004841{
4842 int cpu, ret = 0;
4843
4844 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4845 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004846 ret = ring_buffer_resize(trace_buf->buffer,
4847 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004848 if (ret < 0)
4849 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004850 per_cpu_ptr(trace_buf->data, cpu)->entries =
4851 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004852 }
4853 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004854 ret = ring_buffer_resize(trace_buf->buffer,
4855 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004856 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004857 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4858 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004859 }
4860
4861 return ret;
4862}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004863#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004864
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004865static int __tracing_resize_ring_buffer(struct trace_array *tr,
4866 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004867{
4868 int ret;
4869
4870 /*
4871 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004872 * we use the size that was given, and we can forget about
4873 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004874 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004875 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004876
Steven Rostedtb382ede62012-10-10 21:44:34 -04004877 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004878 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004879 return 0;
4880
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004881 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004882 if (ret < 0)
4883 return ret;
4884
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004885#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004886 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4887 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004888 goto out;
4889
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004890 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004891 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004892 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4893 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004894 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004895 /*
4896 * AARGH! We are left with different
4897 * size max buffer!!!!
4898 * The max buffer is our "snapshot" buffer.
4899 * When a tracer needs a snapshot (one of the
4900 * latency tracers), it swaps the max buffer
4901 * with the saved snap shot. We succeeded to
4902 * update the size of the main buffer, but failed to
4903 * update the size of the max buffer. But when we tried
4904 * to reset the main buffer to the original size, we
4905 * failed there too. This is very unlikely to
4906 * happen, but if it does, warn and kill all
4907 * tracing.
4908 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004909 WARN_ON(1);
4910 tracing_disabled = 1;
4911 }
4912 return ret;
4913 }
4914
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004915 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004916 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004917 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004918 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004919
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004920 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004921#endif /* CONFIG_TRACER_MAX_TRACE */
4922
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004923 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004924 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004925 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004926 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004927
4928 return ret;
4929}
4930
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004931static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4932 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004933{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004934 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004935
4936 mutex_lock(&trace_types_lock);
4937
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004938 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4939 /* make sure, this cpu is enabled in the mask */
4940 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4941 ret = -EINVAL;
4942 goto out;
4943 }
4944 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004945
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004946 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004947 if (ret < 0)
4948 ret = -ENOMEM;
4949
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004950out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004951 mutex_unlock(&trace_types_lock);
4952
4953 return ret;
4954}
4955
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004956
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004957/**
4958 * tracing_update_buffers - used by tracing facility to expand ring buffers
4959 *
4960 * To save on memory when the tracing is never used on a system with it
4961 * configured in. The ring buffers are set to a minimum size. But once
4962 * a user starts to use the tracing facility, then they need to grow
4963 * to their default size.
4964 *
4965 * This function is to be called when a tracer is about to be used.
4966 */
4967int tracing_update_buffers(void)
4968{
4969 int ret = 0;
4970
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004971 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004972 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004973 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004974 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004975 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004976
4977 return ret;
4978}
4979
Steven Rostedt577b7852009-02-26 23:43:05 -05004980struct trace_option_dentry;
4981
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004982static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004983create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004984
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004985/*
4986 * Used to clear out the tracer before deletion of an instance.
4987 * Must have trace_types_lock held.
4988 */
4989static void tracing_set_nop(struct trace_array *tr)
4990{
4991 if (tr->current_trace == &nop_trace)
4992 return;
4993
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004994 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004995
4996 if (tr->current_trace->reset)
4997 tr->current_trace->reset(tr);
4998
4999 tr->current_trace = &nop_trace;
5000}
5001
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005002static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005003{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005004 /* Only enable if the directory has been created already. */
5005 if (!tr->dir)
5006 return;
5007
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005008 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005009}
5010
5011static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5012{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005013 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005014#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005015 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005016#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005017 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005018
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005019 mutex_lock(&trace_types_lock);
5020
Steven Rostedt73c51622009-03-11 13:42:01 -04005021 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005022 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005023 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005024 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005025 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005026 ret = 0;
5027 }
5028
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005029 for (t = trace_types; t; t = t->next) {
5030 if (strcmp(t->name, buf) == 0)
5031 break;
5032 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005033 if (!t) {
5034 ret = -EINVAL;
5035 goto out;
5036 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005037 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005038 goto out;
5039
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005040 /* Some tracers are only allowed for the top level buffer */
5041 if (!trace_ok_for_array(t, tr)) {
5042 ret = -EINVAL;
5043 goto out;
5044 }
5045
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005046 /* If trace pipe files are being read, we can't change the tracer */
5047 if (tr->current_trace->ref) {
5048 ret = -EBUSY;
5049 goto out;
5050 }
5051
Steven Rostedt9f029e82008-11-12 15:24:24 -05005052 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005053
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005054 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005055
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005056 if (tr->current_trace->reset)
5057 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005058
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005059 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005060 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005061
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005062#ifdef CONFIG_TRACER_MAX_TRACE
5063 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005064
5065 if (had_max_tr && !t->use_max_tr) {
5066 /*
5067 * We need to make sure that the update_max_tr sees that
5068 * current_trace changed to nop_trace to keep it from
5069 * swapping the buffers after we resize it.
5070 * The update_max_tr is called from interrupts disabled
5071 * so a synchronized_sched() is sufficient.
5072 */
5073 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005074 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005075 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005076#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005077
5078#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005079 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005080 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005081 if (ret < 0)
5082 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005083 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005084#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005085
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005086 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005087 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005088 if (ret)
5089 goto out;
5090 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005091
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005092 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005093 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005094 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005095 out:
5096 mutex_unlock(&trace_types_lock);
5097
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005098 return ret;
5099}
5100
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005101static ssize_t
5102tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5103 size_t cnt, loff_t *ppos)
5104{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005105 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005106 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005107 int i;
5108 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005109 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005110
Steven Rostedt60063a62008-10-28 10:44:24 -04005111 ret = cnt;
5112
Li Zefanee6c2c12009-09-18 14:06:47 +08005113 if (cnt > MAX_TRACER_SIZE)
5114 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005115
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005116 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005117 return -EFAULT;
5118
5119 buf[cnt] = 0;
5120
5121 /* strip ending whitespace. */
5122 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5123 buf[i] = 0;
5124
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005125 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005126 if (err)
5127 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005128
Jiri Olsacf8517c2009-10-23 19:36:16 -04005129 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005130
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005131 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005132}
5133
5134static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005135tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5136 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005137{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005138 char buf[64];
5139 int r;
5140
Steven Rostedtcffae432008-05-12 21:21:00 +02005141 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005142 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005143 if (r > sizeof(buf))
5144 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005145 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005146}
5147
5148static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005149tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5150 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005151{
Hannes Eder5e398412009-02-10 19:44:34 +01005152 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005153 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005154
Peter Huewe22fe9b52011-06-07 21:58:27 +02005155 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5156 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005157 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005158
5159 *ptr = val * 1000;
5160
5161 return cnt;
5162}
5163
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005164static ssize_t
5165tracing_thresh_read(struct file *filp, char __user *ubuf,
5166 size_t cnt, loff_t *ppos)
5167{
5168 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5169}
5170
5171static ssize_t
5172tracing_thresh_write(struct file *filp, const char __user *ubuf,
5173 size_t cnt, loff_t *ppos)
5174{
5175 struct trace_array *tr = filp->private_data;
5176 int ret;
5177
5178 mutex_lock(&trace_types_lock);
5179 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5180 if (ret < 0)
5181 goto out;
5182
5183 if (tr->current_trace->update_thresh) {
5184 ret = tr->current_trace->update_thresh(tr);
5185 if (ret < 0)
5186 goto out;
5187 }
5188
5189 ret = cnt;
5190out:
5191 mutex_unlock(&trace_types_lock);
5192
5193 return ret;
5194}
5195
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005196#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005197
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005198static ssize_t
5199tracing_max_lat_read(struct file *filp, char __user *ubuf,
5200 size_t cnt, loff_t *ppos)
5201{
5202 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5203}
5204
5205static ssize_t
5206tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5207 size_t cnt, loff_t *ppos)
5208{
5209 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5210}
5211
Chen Gange428abb2015-11-10 05:15:15 +08005212#endif
5213
Steven Rostedtb3806b42008-05-12 21:20:46 +02005214static int tracing_open_pipe(struct inode *inode, struct file *filp)
5215{
Oleg Nesterov15544202013-07-23 17:25:57 +02005216 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005217 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005218 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005219
5220 if (tracing_disabled)
5221 return -ENODEV;
5222
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005223 if (trace_array_get(tr) < 0)
5224 return -ENODEV;
5225
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005226 mutex_lock(&trace_types_lock);
5227
Steven Rostedtb3806b42008-05-12 21:20:46 +02005228 /* create a buffer to store the information to pass to userspace */
5229 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005230 if (!iter) {
5231 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005232 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005233 goto out;
5234 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005235
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005236 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005237 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005238
5239 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5240 ret = -ENOMEM;
5241 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305242 }
5243
Steven Rostedta3097202008-11-07 22:36:02 -05005244 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305245 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005246
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005247 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005248 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5249
David Sharp8be07092012-11-13 12:18:22 -08005250 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005251 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005252 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5253
Oleg Nesterov15544202013-07-23 17:25:57 +02005254 iter->tr = tr;
5255 iter->trace_buffer = &tr->trace_buffer;
5256 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005257 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005258 filp->private_data = iter;
5259
Steven Rostedt107bad82008-05-12 21:21:01 +02005260 if (iter->trace->pipe_open)
5261 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005262
Arnd Bergmannb4447862010-07-07 23:40:11 +02005263 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005264
5265 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005266out:
5267 mutex_unlock(&trace_types_lock);
5268 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005269
5270fail:
5271 kfree(iter->trace);
5272 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005273 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005274 mutex_unlock(&trace_types_lock);
5275 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005276}
5277
5278static int tracing_release_pipe(struct inode *inode, struct file *file)
5279{
5280 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005281 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005282
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005283 mutex_lock(&trace_types_lock);
5284
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005285 tr->current_trace->ref--;
5286
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005287 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005288 iter->trace->pipe_close(iter);
5289
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005290 mutex_unlock(&trace_types_lock);
5291
Rusty Russell44623442009-01-01 10:12:23 +10305292 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005293 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005294 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005295
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005296 trace_array_put(tr);
5297
Steven Rostedtb3806b42008-05-12 21:20:46 +02005298 return 0;
5299}
5300
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005301static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005302trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005303{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005304 struct trace_array *tr = iter->tr;
5305
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005306 /* Iterators are static, they should be filled or empty */
5307 if (trace_buffer_iter(iter, iter->cpu_file))
5308 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005309
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005310 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005311 /*
5312 * Always select as readable when in blocking mode
5313 */
5314 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005315 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005316 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005317 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005318}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005319
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005320static unsigned int
5321tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5322{
5323 struct trace_iterator *iter = filp->private_data;
5324
5325 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005326}
5327
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005328/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005329static int tracing_wait_pipe(struct file *filp)
5330{
5331 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005332 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005333
5334 while (trace_empty(iter)) {
5335
5336 if ((filp->f_flags & O_NONBLOCK)) {
5337 return -EAGAIN;
5338 }
5339
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005340 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005341 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005342 * We still block if tracing is disabled, but we have never
5343 * read anything. This allows a user to cat this file, and
5344 * then enable tracing. But after we have read something,
5345 * we give an EOF when tracing is again disabled.
5346 *
5347 * iter->pos will be 0 if we haven't read anything.
5348 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005349 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005350 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005351
5352 mutex_unlock(&iter->mutex);
5353
Rabin Vincente30f53a2014-11-10 19:46:34 +01005354 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005355
5356 mutex_lock(&iter->mutex);
5357
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005358 if (ret)
5359 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005360 }
5361
5362 return 1;
5363}
5364
Steven Rostedtb3806b42008-05-12 21:20:46 +02005365/*
5366 * Consumer reader.
5367 */
5368static ssize_t
5369tracing_read_pipe(struct file *filp, char __user *ubuf,
5370 size_t cnt, loff_t *ppos)
5371{
5372 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005373 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005374
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005375 /*
5376 * Avoid more than one consumer on a single file descriptor
5377 * This is just a matter of traces coherency, the ring buffer itself
5378 * is protected.
5379 */
5380 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005381
5382 /* return any leftover data */
5383 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5384 if (sret != -EBUSY)
5385 goto out;
5386
5387 trace_seq_init(&iter->seq);
5388
Steven Rostedt107bad82008-05-12 21:21:01 +02005389 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005390 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5391 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005392 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005393 }
5394
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005395waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005396 sret = tracing_wait_pipe(filp);
5397 if (sret <= 0)
5398 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005399
5400 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005401 if (trace_empty(iter)) {
5402 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005403 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005404 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005405
5406 if (cnt >= PAGE_SIZE)
5407 cnt = PAGE_SIZE - 1;
5408
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005409 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005410 memset(&iter->seq, 0,
5411 sizeof(struct trace_iterator) -
5412 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005413 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005414 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005415
Lai Jiangshan4f535962009-05-18 19:35:34 +08005416 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005417 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005418 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005419 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005420 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005421
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005422 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005423 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005424 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005425 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005426 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005427 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005428 if (ret != TRACE_TYPE_NO_CONSUME)
5429 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005430
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005431 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005432 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005433
5434 /*
5435 * Setting the full flag means we reached the trace_seq buffer
5436 * size and we should leave by partial output condition above.
5437 * One of the trace_seq_* functions is not used properly.
5438 */
5439 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5440 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005441 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005442 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005443 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005444
Steven Rostedtb3806b42008-05-12 21:20:46 +02005445 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005446 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005447 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005448 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005449
5450 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005451 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005452 * entries, go back to wait for more entries.
5453 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005454 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005455 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005456
Steven Rostedt107bad82008-05-12 21:21:01 +02005457out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005458 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005459
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005460 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005461}
5462
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005463static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5464 unsigned int idx)
5465{
5466 __free_page(spd->pages[idx]);
5467}
5468
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005469static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005470 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005471 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005472 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005473 .steal = generic_pipe_buf_steal,
5474 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005475};
5476
Steven Rostedt34cd4992009-02-09 12:06:29 -05005477static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005478tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005479{
5480 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005481 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005482 int ret;
5483
5484 /* Seq buffer is page-sized, exactly what we need. */
5485 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005486 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005487 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005488
5489 if (trace_seq_has_overflowed(&iter->seq)) {
5490 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005491 break;
5492 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005493
5494 /*
5495 * This should not be hit, because it should only
5496 * be set if the iter->seq overflowed. But check it
5497 * anyway to be safe.
5498 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005499 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005500 iter->seq.seq.len = save_len;
5501 break;
5502 }
5503
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005504 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005505 if (rem < count) {
5506 rem = 0;
5507 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005508 break;
5509 }
5510
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005511 if (ret != TRACE_TYPE_NO_CONSUME)
5512 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005513 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005514 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005515 rem = 0;
5516 iter->ent = NULL;
5517 break;
5518 }
5519 }
5520
5521 return rem;
5522}
5523
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005524static ssize_t tracing_splice_read_pipe(struct file *filp,
5525 loff_t *ppos,
5526 struct pipe_inode_info *pipe,
5527 size_t len,
5528 unsigned int flags)
5529{
Jens Axboe35f3d142010-05-20 10:43:18 +02005530 struct page *pages_def[PIPE_DEF_BUFFERS];
5531 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005532 struct trace_iterator *iter = filp->private_data;
5533 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005534 .pages = pages_def,
5535 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005536 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005537 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005538 .flags = flags,
5539 .ops = &tracing_pipe_buf_ops,
5540 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005541 };
5542 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005543 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005544 unsigned int i;
5545
Jens Axboe35f3d142010-05-20 10:43:18 +02005546 if (splice_grow_spd(pipe, &spd))
5547 return -ENOMEM;
5548
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005549 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005550
5551 if (iter->trace->splice_read) {
5552 ret = iter->trace->splice_read(iter, filp,
5553 ppos, pipe, len, flags);
5554 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005555 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005556 }
5557
5558 ret = tracing_wait_pipe(filp);
5559 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005560 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005561
Jason Wessel955b61e2010-08-05 09:22:23 -05005562 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005563 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005564 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005565 }
5566
Lai Jiangshan4f535962009-05-18 19:35:34 +08005567 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005568 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005569
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005570 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005571 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005572 spd.pages[i] = alloc_page(GFP_KERNEL);
5573 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005574 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005575
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005576 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005577
5578 /* Copy the data into the page, so we can start over. */
5579 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005580 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005581 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005582 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005583 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005584 break;
5585 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005586 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005587 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005588
Steven Rostedtf9520752009-03-02 14:04:40 -05005589 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005590 }
5591
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005592 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005593 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005594 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005595
5596 spd.nr_pages = i;
5597
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005598 if (i)
5599 ret = splice_to_pipe(pipe, &spd);
5600 else
5601 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005602out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005603 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005604 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005605
Steven Rostedt34cd4992009-02-09 12:06:29 -05005606out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005607 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005608 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005609}
5610
Steven Rostedta98a3c32008-05-12 21:20:59 +02005611static ssize_t
5612tracing_entries_read(struct file *filp, char __user *ubuf,
5613 size_t cnt, loff_t *ppos)
5614{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005615 struct inode *inode = file_inode(filp);
5616 struct trace_array *tr = inode->i_private;
5617 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005618 char buf[64];
5619 int r = 0;
5620 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005621
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005622 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005623
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005624 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005625 int cpu, buf_size_same;
5626 unsigned long size;
5627
5628 size = 0;
5629 buf_size_same = 1;
5630 /* check if all cpu sizes are same */
5631 for_each_tracing_cpu(cpu) {
5632 /* fill in the size from first enabled cpu */
5633 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5635 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005636 buf_size_same = 0;
5637 break;
5638 }
5639 }
5640
5641 if (buf_size_same) {
5642 if (!ring_buffer_expanded)
5643 r = sprintf(buf, "%lu (expanded: %lu)\n",
5644 size >> 10,
5645 trace_buf_size >> 10);
5646 else
5647 r = sprintf(buf, "%lu\n", size >> 10);
5648 } else
5649 r = sprintf(buf, "X\n");
5650 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005651 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005652
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005653 mutex_unlock(&trace_types_lock);
5654
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005655 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5656 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005657}
5658
5659static ssize_t
5660tracing_entries_write(struct file *filp, const char __user *ubuf,
5661 size_t cnt, loff_t *ppos)
5662{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005663 struct inode *inode = file_inode(filp);
5664 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005665 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005666 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005667
Peter Huewe22fe9b52011-06-07 21:58:27 +02005668 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5669 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005670 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005671
5672 /* must have at least 1 entry */
5673 if (!val)
5674 return -EINVAL;
5675
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005676 /* value is in KB */
5677 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005678 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005679 if (ret < 0)
5680 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005681
Jiri Olsacf8517c2009-10-23 19:36:16 -04005682 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005683
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005684 return cnt;
5685}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005686
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005687static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005688tracing_total_entries_read(struct file *filp, char __user *ubuf,
5689 size_t cnt, loff_t *ppos)
5690{
5691 struct trace_array *tr = filp->private_data;
5692 char buf[64];
5693 int r, cpu;
5694 unsigned long size = 0, expanded_size = 0;
5695
5696 mutex_lock(&trace_types_lock);
5697 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005698 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005699 if (!ring_buffer_expanded)
5700 expanded_size += trace_buf_size >> 10;
5701 }
5702 if (ring_buffer_expanded)
5703 r = sprintf(buf, "%lu\n", size);
5704 else
5705 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5706 mutex_unlock(&trace_types_lock);
5707
5708 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5709}
5710
5711static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005712tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5713 size_t cnt, loff_t *ppos)
5714{
5715 /*
5716 * There is no need to read what the user has written, this function
5717 * is just to make sure that there is no error when "echo" is used
5718 */
5719
5720 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005721
5722 return cnt;
5723}
5724
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005725static int
5726tracing_free_buffer_release(struct inode *inode, struct file *filp)
5727{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005728 struct trace_array *tr = inode->i_private;
5729
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005730 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005731 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005732 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005733 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005734 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005735
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005736 trace_array_put(tr);
5737
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005738 return 0;
5739}
5740
Steven Rostedtfa32e852016-07-06 15:25:08 -04005741static inline int lock_user_pages(const char __user *ubuf, size_t cnt,
5742 struct page **pages, void **map_page,
5743 int *offset)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005744{
Steven Rostedtd696b582011-09-22 11:50:27 -04005745 unsigned long addr = (unsigned long)ubuf;
Steven Rostedtd696b582011-09-22 11:50:27 -04005746 int nr_pages = 1;
Steven Rostedtd696b582011-09-22 11:50:27 -04005747 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005748 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005749
Steven Rostedtd696b582011-09-22 11:50:27 -04005750 /*
5751 * Userspace is injecting traces into the kernel trace buffer.
5752 * We want to be as non intrusive as possible.
5753 * To do so, we do not want to allocate any special buffers
5754 * or take any locks, but instead write the userspace data
5755 * straight into the ring buffer.
5756 *
5757 * First we need to pin the userspace buffer into memory,
5758 * which, most likely it is, because it just referenced it.
5759 * But there's no guarantee that it is. By using get_user_pages_fast()
5760 * and kmap_atomic/kunmap_atomic() we can get access to the
5761 * pages directly. We then write the data directly into the
5762 * ring buffer.
5763 */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005764
Steven Rostedtd696b582011-09-22 11:50:27 -04005765 /* check if we cross pages */
5766 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5767 nr_pages = 2;
5768
Steven Rostedtfa32e852016-07-06 15:25:08 -04005769 *offset = addr & (PAGE_SIZE - 1);
Steven Rostedtd696b582011-09-22 11:50:27 -04005770 addr &= PAGE_MASK;
5771
5772 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5773 if (ret < nr_pages) {
5774 while (--ret >= 0)
5775 put_page(pages[ret]);
Steven Rostedtfa32e852016-07-06 15:25:08 -04005776 return -EFAULT;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005777 }
5778
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005779 for (i = 0; i < nr_pages; i++)
5780 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005781
Steven Rostedtfa32e852016-07-06 15:25:08 -04005782 return nr_pages;
5783}
5784
5785static inline void unlock_user_pages(struct page **pages,
5786 void **map_page, int nr_pages)
5787{
5788 int i;
5789
5790 for (i = nr_pages - 1; i >= 0; i--) {
5791 kunmap_atomic(map_page[i]);
5792 put_page(pages[i]);
5793 }
5794}
5795
5796static ssize_t
5797tracing_mark_write(struct file *filp, const char __user *ubuf,
5798 size_t cnt, loff_t *fpos)
5799{
5800 struct trace_array *tr = filp->private_data;
5801 struct ring_buffer_event *event;
5802 struct ring_buffer *buffer;
5803 struct print_entry *entry;
5804 unsigned long irq_flags;
5805 struct page *pages[2];
5806 void *map_page[2];
5807 int nr_pages = 1;
5808 ssize_t written;
5809 int offset;
5810 int size;
5811 int len;
5812
5813 if (tracing_disabled)
5814 return -EINVAL;
5815
5816 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5817 return -EINVAL;
5818
5819 if (cnt > TRACE_BUF_SIZE)
5820 cnt = TRACE_BUF_SIZE;
5821
5822 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5823
5824 nr_pages = lock_user_pages(ubuf, cnt, pages, map_page, &offset);
5825 if (nr_pages < 0)
5826 return nr_pages;
5827
Steven Rostedtd696b582011-09-22 11:50:27 -04005828 local_save_flags(irq_flags);
5829 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005830 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05005831 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5832 irq_flags, preempt_count());
Steven Rostedtd696b582011-09-22 11:50:27 -04005833 if (!event) {
5834 /* Ring buffer disabled, return as if not open for write */
5835 written = -EBADF;
5836 goto out_unlock;
5837 }
5838
5839 entry = ring_buffer_event_data(event);
5840 entry->ip = _THIS_IP_;
5841
5842 if (nr_pages == 2) {
5843 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005844 memcpy(&entry->buf, map_page[0] + offset, len);
5845 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005846 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005847 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005848
5849 if (entry->buf[cnt - 1] != '\n') {
5850 entry->buf[cnt] = '\n';
5851 entry->buf[cnt + 1] = '\0';
5852 } else
5853 entry->buf[cnt] = '\0';
5854
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005855 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005856
5857 written = cnt;
5858
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005859 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005860
Steven Rostedtd696b582011-09-22 11:50:27 -04005861 out_unlock:
Steven Rostedtfa32e852016-07-06 15:25:08 -04005862 unlock_user_pages(pages, map_page, nr_pages);
5863
5864 return written;
5865}
5866
5867/* Limit it for now to 3K (including tag) */
5868#define RAW_DATA_MAX_SIZE (1024*3)
5869
5870static ssize_t
5871tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
5872 size_t cnt, loff_t *fpos)
5873{
5874 struct trace_array *tr = filp->private_data;
5875 struct ring_buffer_event *event;
5876 struct ring_buffer *buffer;
5877 struct raw_data_entry *entry;
5878 unsigned long irq_flags;
5879 struct page *pages[2];
5880 void *map_page[2];
5881 int nr_pages = 1;
5882 ssize_t written;
5883 int offset;
5884 int size;
5885 int len;
5886
5887 if (tracing_disabled)
5888 return -EINVAL;
5889
5890 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5891 return -EINVAL;
5892
5893 /* The marker must at least have a tag id */
5894 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
5895 return -EINVAL;
5896
5897 if (cnt > TRACE_BUF_SIZE)
5898 cnt = TRACE_BUF_SIZE;
5899
5900 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5901
5902 nr_pages = lock_user_pages(ubuf, cnt, pages, map_page, &offset);
5903 if (nr_pages < 0)
5904 return nr_pages;
5905
5906 local_save_flags(irq_flags);
5907 size = sizeof(*entry) + cnt;
5908 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05005909 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
5910 irq_flags, preempt_count());
Steven Rostedtfa32e852016-07-06 15:25:08 -04005911 if (!event) {
5912 /* Ring buffer disabled, return as if not open for write */
5913 written = -EBADF;
5914 goto out_unlock;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005915 }
Steven Rostedtfa32e852016-07-06 15:25:08 -04005916
5917 entry = ring_buffer_event_data(event);
5918
5919 if (nr_pages == 2) {
5920 len = PAGE_SIZE - offset;
5921 memcpy(&entry->id, map_page[0] + offset, len);
5922 memcpy(((char *)&entry->id) + len, map_page[1], cnt - len);
5923 } else
5924 memcpy(&entry->id, map_page[0] + offset, cnt);
5925
5926 __buffer_unlock_commit(buffer, event);
5927
5928 written = cnt;
5929
5930 *fpos += written;
5931
5932 out_unlock:
5933 unlock_user_pages(pages, map_page, nr_pages);
5934
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005935 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005936}
5937
Li Zefan13f16d22009-12-08 11:16:11 +08005938static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005939{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005940 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005941 int i;
5942
5943 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005944 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005945 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005946 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5947 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005948 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005949
Li Zefan13f16d22009-12-08 11:16:11 +08005950 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005951}
5952
Steven Rostedte1e232c2014-02-10 23:38:46 -05005953static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005954{
Zhaolei5079f322009-08-25 16:12:56 +08005955 int i;
5956
Zhaolei5079f322009-08-25 16:12:56 +08005957 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5958 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5959 break;
5960 }
5961 if (i == ARRAY_SIZE(trace_clocks))
5962 return -EINVAL;
5963
Zhaolei5079f322009-08-25 16:12:56 +08005964 mutex_lock(&trace_types_lock);
5965
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005966 tr->clock_id = i;
5967
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005968 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005969
David Sharp60303ed2012-10-11 16:27:52 -07005970 /*
5971 * New clock may not be consistent with the previous clock.
5972 * Reset the buffer so that it doesn't have incomparable timestamps.
5973 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005974 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005975
5976#ifdef CONFIG_TRACER_MAX_TRACE
5977 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5978 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005979 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005980#endif
David Sharp60303ed2012-10-11 16:27:52 -07005981
Zhaolei5079f322009-08-25 16:12:56 +08005982 mutex_unlock(&trace_types_lock);
5983
Steven Rostedte1e232c2014-02-10 23:38:46 -05005984 return 0;
5985}
5986
5987static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5988 size_t cnt, loff_t *fpos)
5989{
5990 struct seq_file *m = filp->private_data;
5991 struct trace_array *tr = m->private;
5992 char buf[64];
5993 const char *clockstr;
5994 int ret;
5995
5996 if (cnt >= sizeof(buf))
5997 return -EINVAL;
5998
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005999 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006000 return -EFAULT;
6001
6002 buf[cnt] = 0;
6003
6004 clockstr = strstrip(buf);
6005
6006 ret = tracing_set_clock(tr, clockstr);
6007 if (ret)
6008 return ret;
6009
Zhaolei5079f322009-08-25 16:12:56 +08006010 *fpos += cnt;
6011
6012 return cnt;
6013}
6014
Li Zefan13f16d22009-12-08 11:16:11 +08006015static int tracing_clock_open(struct inode *inode, struct file *file)
6016{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006017 struct trace_array *tr = inode->i_private;
6018 int ret;
6019
Li Zefan13f16d22009-12-08 11:16:11 +08006020 if (tracing_disabled)
6021 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006022
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006023 if (trace_array_get(tr))
6024 return -ENODEV;
6025
6026 ret = single_open(file, tracing_clock_show, inode->i_private);
6027 if (ret < 0)
6028 trace_array_put(tr);
6029
6030 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006031}
6032
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006033struct ftrace_buffer_info {
6034 struct trace_iterator iter;
6035 void *spare;
6036 unsigned int read;
6037};
6038
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006039#ifdef CONFIG_TRACER_SNAPSHOT
6040static int tracing_snapshot_open(struct inode *inode, struct file *file)
6041{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006042 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006043 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006044 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006045 int ret = 0;
6046
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006047 if (trace_array_get(tr) < 0)
6048 return -ENODEV;
6049
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006050 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006051 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006052 if (IS_ERR(iter))
6053 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006054 } else {
6055 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006056 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057 m = kzalloc(sizeof(*m), GFP_KERNEL);
6058 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006059 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006060 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6061 if (!iter) {
6062 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006063 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006064 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006065 ret = 0;
6066
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006067 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006068 iter->trace_buffer = &tr->max_buffer;
6069 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006070 m->private = iter;
6071 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006072 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006073out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006074 if (ret < 0)
6075 trace_array_put(tr);
6076
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006077 return ret;
6078}
6079
6080static ssize_t
6081tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6082 loff_t *ppos)
6083{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006084 struct seq_file *m = filp->private_data;
6085 struct trace_iterator *iter = m->private;
6086 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006087 unsigned long val;
6088 int ret;
6089
6090 ret = tracing_update_buffers();
6091 if (ret < 0)
6092 return ret;
6093
6094 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6095 if (ret)
6096 return ret;
6097
6098 mutex_lock(&trace_types_lock);
6099
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006100 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006101 ret = -EBUSY;
6102 goto out;
6103 }
6104
6105 switch (val) {
6106 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006107 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6108 ret = -EINVAL;
6109 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006110 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006111 if (tr->allocated_snapshot)
6112 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006113 break;
6114 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006115/* Only allow per-cpu swap if the ring buffer supports it */
6116#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6117 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6118 ret = -EINVAL;
6119 break;
6120 }
6121#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006122 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006123 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006124 if (ret < 0)
6125 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006126 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006127 local_irq_disable();
6128 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006129 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006130 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006131 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006132 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006133 local_irq_enable();
6134 break;
6135 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006136 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006137 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6138 tracing_reset_online_cpus(&tr->max_buffer);
6139 else
6140 tracing_reset(&tr->max_buffer, iter->cpu_file);
6141 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006142 break;
6143 }
6144
6145 if (ret >= 0) {
6146 *ppos += cnt;
6147 ret = cnt;
6148 }
6149out:
6150 mutex_unlock(&trace_types_lock);
6151 return ret;
6152}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006153
6154static int tracing_snapshot_release(struct inode *inode, struct file *file)
6155{
6156 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006157 int ret;
6158
6159 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006160
6161 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006162 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006163
6164 /* If write only, the seq_file is just a stub */
6165 if (m)
6166 kfree(m->private);
6167 kfree(m);
6168
6169 return 0;
6170}
6171
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006172static int tracing_buffers_open(struct inode *inode, struct file *filp);
6173static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6174 size_t count, loff_t *ppos);
6175static int tracing_buffers_release(struct inode *inode, struct file *file);
6176static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6177 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6178
6179static int snapshot_raw_open(struct inode *inode, struct file *filp)
6180{
6181 struct ftrace_buffer_info *info;
6182 int ret;
6183
6184 ret = tracing_buffers_open(inode, filp);
6185 if (ret < 0)
6186 return ret;
6187
6188 info = filp->private_data;
6189
6190 if (info->iter.trace->use_max_tr) {
6191 tracing_buffers_release(inode, filp);
6192 return -EBUSY;
6193 }
6194
6195 info->iter.snapshot = true;
6196 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6197
6198 return ret;
6199}
6200
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006201#endif /* CONFIG_TRACER_SNAPSHOT */
6202
6203
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006204static const struct file_operations tracing_thresh_fops = {
6205 .open = tracing_open_generic,
6206 .read = tracing_thresh_read,
6207 .write = tracing_thresh_write,
6208 .llseek = generic_file_llseek,
6209};
6210
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006211#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006212static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006213 .open = tracing_open_generic,
6214 .read = tracing_max_lat_read,
6215 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006216 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006217};
Chen Gange428abb2015-11-10 05:15:15 +08006218#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006219
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006220static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006221 .open = tracing_open_generic,
6222 .read = tracing_set_trace_read,
6223 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006224 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006225};
6226
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006227static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006228 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006229 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006230 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006231 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006232 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006233 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006234};
6235
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006236static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006237 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006238 .read = tracing_entries_read,
6239 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006240 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006241 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006242};
6243
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006244static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006245 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006246 .read = tracing_total_entries_read,
6247 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006248 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006249};
6250
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006251static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006252 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006253 .write = tracing_free_buffer_write,
6254 .release = tracing_free_buffer_release,
6255};
6256
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006257static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006258 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006259 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006260 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006261 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006262};
6263
Steven Rostedtfa32e852016-07-06 15:25:08 -04006264static const struct file_operations tracing_mark_raw_fops = {
6265 .open = tracing_open_generic_tr,
6266 .write = tracing_mark_raw_write,
6267 .llseek = generic_file_llseek,
6268 .release = tracing_release_generic_tr,
6269};
6270
Zhaolei5079f322009-08-25 16:12:56 +08006271static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006272 .open = tracing_clock_open,
6273 .read = seq_read,
6274 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006275 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006276 .write = tracing_clock_write,
6277};
6278
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006279#ifdef CONFIG_TRACER_SNAPSHOT
6280static const struct file_operations snapshot_fops = {
6281 .open = tracing_snapshot_open,
6282 .read = seq_read,
6283 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006284 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006285 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006286};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006287
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006288static const struct file_operations snapshot_raw_fops = {
6289 .open = snapshot_raw_open,
6290 .read = tracing_buffers_read,
6291 .release = tracing_buffers_release,
6292 .splice_read = tracing_buffers_splice_read,
6293 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006294};
6295
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006296#endif /* CONFIG_TRACER_SNAPSHOT */
6297
Steven Rostedt2cadf912008-12-01 22:20:19 -05006298static int tracing_buffers_open(struct inode *inode, struct file *filp)
6299{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006300 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006301 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006302 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006303
6304 if (tracing_disabled)
6305 return -ENODEV;
6306
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006307 if (trace_array_get(tr) < 0)
6308 return -ENODEV;
6309
Steven Rostedt2cadf912008-12-01 22:20:19 -05006310 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006311 if (!info) {
6312 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006313 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006314 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006315
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006316 mutex_lock(&trace_types_lock);
6317
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006318 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006319 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006320 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006321 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006322 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006323 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006324 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006325
6326 filp->private_data = info;
6327
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006328 tr->current_trace->ref++;
6329
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006330 mutex_unlock(&trace_types_lock);
6331
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006332 ret = nonseekable_open(inode, filp);
6333 if (ret < 0)
6334 trace_array_put(tr);
6335
6336 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006337}
6338
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006339static unsigned int
6340tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6341{
6342 struct ftrace_buffer_info *info = filp->private_data;
6343 struct trace_iterator *iter = &info->iter;
6344
6345 return trace_poll(iter, filp, poll_table);
6346}
6347
Steven Rostedt2cadf912008-12-01 22:20:19 -05006348static ssize_t
6349tracing_buffers_read(struct file *filp, char __user *ubuf,
6350 size_t count, loff_t *ppos)
6351{
6352 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006353 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006354 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006355 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006356
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006357 if (!count)
6358 return 0;
6359
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006360#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006361 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6362 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006363#endif
6364
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006365 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006366 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6367 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006368 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006369 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006370
Steven Rostedt2cadf912008-12-01 22:20:19 -05006371 /* Do we have previous read data to read? */
6372 if (info->read < PAGE_SIZE)
6373 goto read;
6374
Steven Rostedtb6273442013-02-28 13:44:11 -05006375 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006376 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006377 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006378 &info->spare,
6379 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006380 iter->cpu_file, 0);
6381 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006382
6383 if (ret < 0) {
6384 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006385 if ((filp->f_flags & O_NONBLOCK))
6386 return -EAGAIN;
6387
Rabin Vincente30f53a2014-11-10 19:46:34 +01006388 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006389 if (ret)
6390 return ret;
6391
Steven Rostedtb6273442013-02-28 13:44:11 -05006392 goto again;
6393 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006394 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006395 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006396
Steven Rostedt436fc282011-10-14 10:44:25 -04006397 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006398 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006399 size = PAGE_SIZE - info->read;
6400 if (size > count)
6401 size = count;
6402
6403 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006404 if (ret == size)
6405 return -EFAULT;
6406
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006407 size -= ret;
6408
Steven Rostedt2cadf912008-12-01 22:20:19 -05006409 *ppos += size;
6410 info->read += size;
6411
6412 return size;
6413}
6414
6415static int tracing_buffers_release(struct inode *inode, struct file *file)
6416{
6417 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006418 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006419
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006420 mutex_lock(&trace_types_lock);
6421
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006422 iter->tr->current_trace->ref--;
6423
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006424 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006425
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006426 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006427 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006428 kfree(info);
6429
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006430 mutex_unlock(&trace_types_lock);
6431
Steven Rostedt2cadf912008-12-01 22:20:19 -05006432 return 0;
6433}
6434
6435struct buffer_ref {
6436 struct ring_buffer *buffer;
6437 void *page;
6438 int ref;
6439};
6440
6441static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6442 struct pipe_buffer *buf)
6443{
6444 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6445
6446 if (--ref->ref)
6447 return;
6448
6449 ring_buffer_free_read_page(ref->buffer, ref->page);
6450 kfree(ref);
6451 buf->private = 0;
6452}
6453
Steven Rostedt2cadf912008-12-01 22:20:19 -05006454static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6455 struct pipe_buffer *buf)
6456{
6457 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6458
6459 ref->ref++;
6460}
6461
6462/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006463static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006464 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006465 .confirm = generic_pipe_buf_confirm,
6466 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006467 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006468 .get = buffer_pipe_buf_get,
6469};
6470
6471/*
6472 * Callback from splice_to_pipe(), if we need to release some pages
6473 * at the end of the spd in case we error'ed out in filling the pipe.
6474 */
6475static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6476{
6477 struct buffer_ref *ref =
6478 (struct buffer_ref *)spd->partial[i].private;
6479
6480 if (--ref->ref)
6481 return;
6482
6483 ring_buffer_free_read_page(ref->buffer, ref->page);
6484 kfree(ref);
6485 spd->partial[i].private = 0;
6486}
6487
6488static ssize_t
6489tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6490 struct pipe_inode_info *pipe, size_t len,
6491 unsigned int flags)
6492{
6493 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006494 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006495 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6496 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006497 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006498 .pages = pages_def,
6499 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006500 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006501 .flags = flags,
6502 .ops = &buffer_pipe_buf_ops,
6503 .spd_release = buffer_spd_release,
6504 };
6505 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04006506 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006507 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006508
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006509#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006510 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6511 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006512#endif
6513
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006514 if (*ppos & (PAGE_SIZE - 1))
6515 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006516
6517 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006518 if (len < PAGE_SIZE)
6519 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006520 len &= PAGE_MASK;
6521 }
6522
Al Viro1ae22932016-09-17 18:31:46 -04006523 if (splice_grow_spd(pipe, &spd))
6524 return -ENOMEM;
6525
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006526 again:
6527 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006528 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006529
Al Viroa786c062014-04-11 12:01:03 -04006530 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006531 struct page *page;
6532 int r;
6533
6534 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006535 if (!ref) {
6536 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006537 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006538 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006539
Steven Rostedt7267fa62009-04-29 00:16:21 -04006540 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006541 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006542 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006543 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006544 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006545 kfree(ref);
6546 break;
6547 }
6548
6549 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006550 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006551 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006552 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006553 kfree(ref);
6554 break;
6555 }
6556
6557 /*
6558 * zero out any left over data, this is going to
6559 * user land.
6560 */
6561 size = ring_buffer_page_len(ref->page);
6562 if (size < PAGE_SIZE)
6563 memset(ref->page + size, 0, PAGE_SIZE - size);
6564
6565 page = virt_to_page(ref->page);
6566
6567 spd.pages[i] = page;
6568 spd.partial[i].len = PAGE_SIZE;
6569 spd.partial[i].offset = 0;
6570 spd.partial[i].private = (unsigned long)ref;
6571 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006572 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006573
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006574 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006575 }
6576
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006577 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006578 spd.nr_pages = i;
6579
6580 /* did we read anything? */
6581 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006582 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006583 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006584
Al Viro1ae22932016-09-17 18:31:46 -04006585 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006586 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006587 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006588
Rabin Vincente30f53a2014-11-10 19:46:34 +01006589 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006590 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006591 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006592
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006593 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006594 }
6595
6596 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006597out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006598 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006599
Steven Rostedt2cadf912008-12-01 22:20:19 -05006600 return ret;
6601}
6602
6603static const struct file_operations tracing_buffers_fops = {
6604 .open = tracing_buffers_open,
6605 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006606 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006607 .release = tracing_buffers_release,
6608 .splice_read = tracing_buffers_splice_read,
6609 .llseek = no_llseek,
6610};
6611
Steven Rostedtc8d77182009-04-29 18:03:45 -04006612static ssize_t
6613tracing_stats_read(struct file *filp, char __user *ubuf,
6614 size_t count, loff_t *ppos)
6615{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006616 struct inode *inode = file_inode(filp);
6617 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006618 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006619 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006620 struct trace_seq *s;
6621 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006622 unsigned long long t;
6623 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006624
Li Zefane4f2d102009-06-15 10:57:28 +08006625 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006626 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006627 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006628
6629 trace_seq_init(s);
6630
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006631 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006632 trace_seq_printf(s, "entries: %ld\n", cnt);
6633
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006634 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006635 trace_seq_printf(s, "overrun: %ld\n", cnt);
6636
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006637 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006638 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6639
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006640 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006641 trace_seq_printf(s, "bytes: %ld\n", cnt);
6642
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006643 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006644 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006645 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006646 usec_rem = do_div(t, USEC_PER_SEC);
6647 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6648 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006649
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006650 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006651 usec_rem = do_div(t, USEC_PER_SEC);
6652 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6653 } else {
6654 /* counter or tsc mode for trace_clock */
6655 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006656 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006657
6658 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006659 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006660 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006661
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006662 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006663 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6664
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006665 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006666 trace_seq_printf(s, "read events: %ld\n", cnt);
6667
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006668 count = simple_read_from_buffer(ubuf, count, ppos,
6669 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006670
6671 kfree(s);
6672
6673 return count;
6674}
6675
6676static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006677 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006678 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006679 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006680 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006681};
6682
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006683#ifdef CONFIG_DYNAMIC_FTRACE
6684
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006685int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006686{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006687 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006688}
6689
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006690static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006691tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006692 size_t cnt, loff_t *ppos)
6693{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006694 static char ftrace_dyn_info_buffer[1024];
6695 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006696 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006697 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006698 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006699 int r;
6700
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006701 mutex_lock(&dyn_info_mutex);
6702 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006703
Steven Rostedta26a2a22008-10-31 00:03:22 -04006704 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006705 buf[r++] = '\n';
6706
6707 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6708
6709 mutex_unlock(&dyn_info_mutex);
6710
6711 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006712}
6713
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006714static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006715 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006716 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006717 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006718};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006719#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006720
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006721#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6722static void
6723ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006724{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006725 tracing_snapshot();
6726}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006727
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006728static void
6729ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6730{
6731 unsigned long *count = (long *)data;
6732
6733 if (!*count)
6734 return;
6735
6736 if (*count != -1)
6737 (*count)--;
6738
6739 tracing_snapshot();
6740}
6741
6742static int
6743ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6744 struct ftrace_probe_ops *ops, void *data)
6745{
6746 long count = (long)data;
6747
6748 seq_printf(m, "%ps:", (void *)ip);
6749
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006750 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006751
6752 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006753 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006754 else
6755 seq_printf(m, ":count=%ld\n", count);
6756
6757 return 0;
6758}
6759
6760static struct ftrace_probe_ops snapshot_probe_ops = {
6761 .func = ftrace_snapshot,
6762 .print = ftrace_snapshot_print,
6763};
6764
6765static struct ftrace_probe_ops snapshot_count_probe_ops = {
6766 .func = ftrace_count_snapshot,
6767 .print = ftrace_snapshot_print,
6768};
6769
6770static int
6771ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6772 char *glob, char *cmd, char *param, int enable)
6773{
6774 struct ftrace_probe_ops *ops;
6775 void *count = (void *)-1;
6776 char *number;
6777 int ret;
6778
6779 /* hash funcs only work with set_ftrace_filter */
6780 if (!enable)
6781 return -EINVAL;
6782
6783 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6784
6785 if (glob[0] == '!') {
6786 unregister_ftrace_function_probe_func(glob+1, ops);
6787 return 0;
6788 }
6789
6790 if (!param)
6791 goto out_reg;
6792
6793 number = strsep(&param, ":");
6794
6795 if (!strlen(number))
6796 goto out_reg;
6797
6798 /*
6799 * We use the callback data field (which is a pointer)
6800 * as our counter.
6801 */
6802 ret = kstrtoul(number, 0, (unsigned long *)&count);
6803 if (ret)
6804 return ret;
6805
6806 out_reg:
6807 ret = register_ftrace_function_probe(glob, ops, count);
6808
6809 if (ret >= 0)
6810 alloc_snapshot(&global_trace);
6811
6812 return ret < 0 ? ret : 0;
6813}
6814
6815static struct ftrace_func_command ftrace_snapshot_cmd = {
6816 .name = "snapshot",
6817 .func = ftrace_trace_snapshot_callback,
6818};
6819
Tom Zanussi38de93a2013-10-24 08:34:18 -05006820static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006821{
6822 return register_ftrace_command(&ftrace_snapshot_cmd);
6823}
6824#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006825static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006826#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006827
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006828static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006829{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006830 if (WARN_ON(!tr->dir))
6831 return ERR_PTR(-ENODEV);
6832
6833 /* Top directory uses NULL as the parent */
6834 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6835 return NULL;
6836
6837 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006838 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006839}
6840
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006841static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6842{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006843 struct dentry *d_tracer;
6844
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006845 if (tr->percpu_dir)
6846 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006847
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006848 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006849 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006850 return NULL;
6851
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006852 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006853
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006854 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006855 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006856
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006857 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006858}
6859
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006860static struct dentry *
6861trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6862 void *data, long cpu, const struct file_operations *fops)
6863{
6864 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6865
6866 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006867 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006868 return ret;
6869}
6870
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006871static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006872tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006873{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006874 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006875 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006876 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006877
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006878 if (!d_percpu)
6879 return;
6880
Steven Rostedtdd49a382010-10-20 21:51:26 -04006881 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006882 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006883 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07006884 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006885 return;
6886 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006887
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006888 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006889 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006890 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006891
6892 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006893 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006894 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006895
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006896 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006897 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006898
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006899 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006900 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006901
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006902 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006903 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006904
6905#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006906 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006907 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006908
Oleg Nesterov649e9c702013-07-23 17:25:54 +02006909 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006910 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006911#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006912}
6913
Steven Rostedt60a11772008-05-12 21:20:44 +02006914#ifdef CONFIG_FTRACE_SELFTEST
6915/* Let selftest have access to static functions in this file */
6916#include "trace_selftest.c"
6917#endif
6918
Steven Rostedt577b7852009-02-26 23:43:05 -05006919static ssize_t
6920trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6921 loff_t *ppos)
6922{
6923 struct trace_option_dentry *topt = filp->private_data;
6924 char *buf;
6925
6926 if (topt->flags->val & topt->opt->bit)
6927 buf = "1\n";
6928 else
6929 buf = "0\n";
6930
6931 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6932}
6933
6934static ssize_t
6935trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6936 loff_t *ppos)
6937{
6938 struct trace_option_dentry *topt = filp->private_data;
6939 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006940 int ret;
6941
Peter Huewe22fe9b52011-06-07 21:58:27 +02006942 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6943 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006944 return ret;
6945
Li Zefan8d18eaa2009-12-08 11:17:06 +08006946 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006947 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006948
6949 if (!!(topt->flags->val & topt->opt->bit) != val) {
6950 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006951 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006952 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006953 mutex_unlock(&trace_types_lock);
6954 if (ret)
6955 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006956 }
6957
6958 *ppos += cnt;
6959
6960 return cnt;
6961}
6962
6963
6964static const struct file_operations trace_options_fops = {
6965 .open = tracing_open_generic,
6966 .read = trace_options_read,
6967 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006968 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006969};
6970
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006971/*
6972 * In order to pass in both the trace_array descriptor as well as the index
6973 * to the flag that the trace option file represents, the trace_array
6974 * has a character array of trace_flags_index[], which holds the index
6975 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6976 * The address of this character array is passed to the flag option file
6977 * read/write callbacks.
6978 *
6979 * In order to extract both the index and the trace_array descriptor,
6980 * get_tr_index() uses the following algorithm.
6981 *
6982 * idx = *ptr;
6983 *
6984 * As the pointer itself contains the address of the index (remember
6985 * index[1] == 1).
6986 *
6987 * Then to get the trace_array descriptor, by subtracting that index
6988 * from the ptr, we get to the start of the index itself.
6989 *
6990 * ptr - idx == &index[0]
6991 *
6992 * Then a simple container_of() from that pointer gets us to the
6993 * trace_array descriptor.
6994 */
6995static void get_tr_index(void *data, struct trace_array **ptr,
6996 unsigned int *pindex)
6997{
6998 *pindex = *(unsigned char *)data;
6999
7000 *ptr = container_of(data - *pindex, struct trace_array,
7001 trace_flags_index);
7002}
7003
Steven Rostedta8259072009-02-26 22:19:12 -05007004static ssize_t
7005trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7006 loff_t *ppos)
7007{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007008 void *tr_index = filp->private_data;
7009 struct trace_array *tr;
7010 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007011 char *buf;
7012
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007013 get_tr_index(tr_index, &tr, &index);
7014
7015 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007016 buf = "1\n";
7017 else
7018 buf = "0\n";
7019
7020 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7021}
7022
7023static ssize_t
7024trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7025 loff_t *ppos)
7026{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007027 void *tr_index = filp->private_data;
7028 struct trace_array *tr;
7029 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007030 unsigned long val;
7031 int ret;
7032
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007033 get_tr_index(tr_index, &tr, &index);
7034
Peter Huewe22fe9b52011-06-07 21:58:27 +02007035 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7036 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007037 return ret;
7038
Zhaoleif2d84b62009-08-07 18:55:48 +08007039 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007040 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007041
7042 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007043 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007044 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007045
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007046 if (ret < 0)
7047 return ret;
7048
Steven Rostedta8259072009-02-26 22:19:12 -05007049 *ppos += cnt;
7050
7051 return cnt;
7052}
7053
Steven Rostedta8259072009-02-26 22:19:12 -05007054static const struct file_operations trace_options_core_fops = {
7055 .open = tracing_open_generic,
7056 .read = trace_options_core_read,
7057 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007058 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007059};
7060
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007061struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007062 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007063 struct dentry *parent,
7064 void *data,
7065 const struct file_operations *fops)
7066{
7067 struct dentry *ret;
7068
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007069 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007070 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007071 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007072
7073 return ret;
7074}
7075
7076
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007077static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007078{
7079 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007080
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007081 if (tr->options)
7082 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007083
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007084 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007085 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007086 return NULL;
7087
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007088 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007089 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007090 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007091 return NULL;
7092 }
7093
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007094 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007095}
7096
Steven Rostedt577b7852009-02-26 23:43:05 -05007097static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007098create_trace_option_file(struct trace_array *tr,
7099 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007100 struct tracer_flags *flags,
7101 struct tracer_opt *opt)
7102{
7103 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007104
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007105 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007106 if (!t_options)
7107 return;
7108
7109 topt->flags = flags;
7110 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007111 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007112
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007113 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007114 &trace_options_fops);
7115
Steven Rostedt577b7852009-02-26 23:43:05 -05007116}
7117
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007118static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007119create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007120{
7121 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007122 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007123 struct tracer_flags *flags;
7124 struct tracer_opt *opts;
7125 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007126 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007127
7128 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007129 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007130
7131 flags = tracer->flags;
7132
7133 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007134 return;
7135
7136 /*
7137 * If this is an instance, only create flags for tracers
7138 * the instance may have.
7139 */
7140 if (!trace_ok_for_array(tracer, tr))
7141 return;
7142
7143 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007144 /* Make sure there's no duplicate flags. */
7145 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007146 return;
7147 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007148
7149 opts = flags->opts;
7150
7151 for (cnt = 0; opts[cnt].name; cnt++)
7152 ;
7153
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007154 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007155 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007156 return;
7157
7158 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7159 GFP_KERNEL);
7160 if (!tr_topts) {
7161 kfree(topts);
7162 return;
7163 }
7164
7165 tr->topts = tr_topts;
7166 tr->topts[tr->nr_topts].tracer = tracer;
7167 tr->topts[tr->nr_topts].topts = topts;
7168 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007169
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007170 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007171 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007172 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007173 WARN_ONCE(topts[cnt].entry == NULL,
7174 "Failed to create trace option: %s",
7175 opts[cnt].name);
7176 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007177}
7178
Steven Rostedta8259072009-02-26 22:19:12 -05007179static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007180create_trace_option_core_file(struct trace_array *tr,
7181 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007182{
7183 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007184
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007185 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007186 if (!t_options)
7187 return NULL;
7188
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007189 return trace_create_file(option, 0644, t_options,
7190 (void *)&tr->trace_flags_index[index],
7191 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007192}
7193
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007194static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007195{
7196 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007197 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007198 int i;
7199
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007200 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007201 if (!t_options)
7202 return;
7203
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007204 for (i = 0; trace_options[i]; i++) {
7205 if (top_level ||
7206 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7207 create_trace_option_core_file(tr, trace_options[i], i);
7208 }
Steven Rostedta8259072009-02-26 22:19:12 -05007209}
7210
Steven Rostedt499e5472012-02-22 15:50:28 -05007211static ssize_t
7212rb_simple_read(struct file *filp, char __user *ubuf,
7213 size_t cnt, loff_t *ppos)
7214{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007215 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007216 char buf[64];
7217 int r;
7218
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007219 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007220 r = sprintf(buf, "%d\n", r);
7221
7222 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7223}
7224
7225static ssize_t
7226rb_simple_write(struct file *filp, const char __user *ubuf,
7227 size_t cnt, loff_t *ppos)
7228{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007229 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007230 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007231 unsigned long val;
7232 int ret;
7233
7234 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7235 if (ret)
7236 return ret;
7237
7238 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007239 mutex_lock(&trace_types_lock);
7240 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007241 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007242 if (tr->current_trace->start)
7243 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007244 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007245 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007246 if (tr->current_trace->stop)
7247 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007248 }
7249 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007250 }
7251
7252 (*ppos)++;
7253
7254 return cnt;
7255}
7256
7257static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007258 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007259 .read = rb_simple_read,
7260 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007261 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007262 .llseek = default_llseek,
7263};
7264
Steven Rostedt277ba042012-08-03 16:10:49 -04007265struct dentry *trace_instance_dir;
7266
7267static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007268init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007269
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007270static int
7271allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007272{
7273 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007274
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007275 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007276
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007277 buf->tr = tr;
7278
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007279 buf->buffer = ring_buffer_alloc(size, rb_flags);
7280 if (!buf->buffer)
7281 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007282
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007283 buf->data = alloc_percpu(struct trace_array_cpu);
7284 if (!buf->data) {
7285 ring_buffer_free(buf->buffer);
7286 return -ENOMEM;
7287 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007288
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007289 /* Allocate the first page for all buffers */
7290 set_buffer_entries(&tr->trace_buffer,
7291 ring_buffer_size(tr->trace_buffer.buffer, 0));
7292
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007293 return 0;
7294}
7295
7296static int allocate_trace_buffers(struct trace_array *tr, int size)
7297{
7298 int ret;
7299
7300 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7301 if (ret)
7302 return ret;
7303
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007304#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007305 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7306 allocate_snapshot ? size : 1);
7307 if (WARN_ON(ret)) {
7308 ring_buffer_free(tr->trace_buffer.buffer);
7309 free_percpu(tr->trace_buffer.data);
7310 return -ENOMEM;
7311 }
7312 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007313
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007314 /*
7315 * Only the top level trace array gets its snapshot allocated
7316 * from the kernel command line.
7317 */
7318 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007319#endif
7320 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007321}
7322
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007323static void free_trace_buffer(struct trace_buffer *buf)
7324{
7325 if (buf->buffer) {
7326 ring_buffer_free(buf->buffer);
7327 buf->buffer = NULL;
7328 free_percpu(buf->data);
7329 buf->data = NULL;
7330 }
7331}
7332
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007333static void free_trace_buffers(struct trace_array *tr)
7334{
7335 if (!tr)
7336 return;
7337
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007338 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007339
7340#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007341 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007342#endif
7343}
7344
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007345static void init_trace_flags_index(struct trace_array *tr)
7346{
7347 int i;
7348
7349 /* Used by the trace options files */
7350 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7351 tr->trace_flags_index[i] = i;
7352}
7353
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007354static void __update_tracer_options(struct trace_array *tr)
7355{
7356 struct tracer *t;
7357
7358 for (t = trace_types; t; t = t->next)
7359 add_tracer_options(tr, t);
7360}
7361
7362static void update_tracer_options(struct trace_array *tr)
7363{
7364 mutex_lock(&trace_types_lock);
7365 __update_tracer_options(tr);
7366 mutex_unlock(&trace_types_lock);
7367}
7368
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007369static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007370{
Steven Rostedt277ba042012-08-03 16:10:49 -04007371 struct trace_array *tr;
7372 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007373
7374 mutex_lock(&trace_types_lock);
7375
7376 ret = -EEXIST;
7377 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7378 if (tr->name && strcmp(tr->name, name) == 0)
7379 goto out_unlock;
7380 }
7381
7382 ret = -ENOMEM;
7383 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7384 if (!tr)
7385 goto out_unlock;
7386
7387 tr->name = kstrdup(name, GFP_KERNEL);
7388 if (!tr->name)
7389 goto out_free_tr;
7390
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007391 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7392 goto out_free_tr;
7393
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007394 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007395
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007396 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7397
Steven Rostedt277ba042012-08-03 16:10:49 -04007398 raw_spin_lock_init(&tr->start_lock);
7399
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007400 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7401
Steven Rostedt277ba042012-08-03 16:10:49 -04007402 tr->current_trace = &nop_trace;
7403
7404 INIT_LIST_HEAD(&tr->systems);
7405 INIT_LIST_HEAD(&tr->events);
7406
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007407 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007408 goto out_free_tr;
7409
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007410 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007411 if (!tr->dir)
7412 goto out_free_tr;
7413
7414 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007415 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007416 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007417 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007418 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007419
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007420 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007421 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007422 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007423
7424 list_add(&tr->list, &ftrace_trace_arrays);
7425
7426 mutex_unlock(&trace_types_lock);
7427
7428 return 0;
7429
7430 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007431 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007432 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007433 kfree(tr->name);
7434 kfree(tr);
7435
7436 out_unlock:
7437 mutex_unlock(&trace_types_lock);
7438
7439 return ret;
7440
7441}
7442
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007443static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007444{
7445 struct trace_array *tr;
7446 int found = 0;
7447 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007448 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007449
7450 mutex_lock(&trace_types_lock);
7451
7452 ret = -ENODEV;
7453 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7454 if (tr->name && strcmp(tr->name, name) == 0) {
7455 found = 1;
7456 break;
7457 }
7458 }
7459 if (!found)
7460 goto out_unlock;
7461
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007462 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007463 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007464 goto out_unlock;
7465
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007466 list_del(&tr->list);
7467
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007468 /* Disable all the flags that were enabled coming in */
7469 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7470 if ((1 << i) & ZEROED_TRACE_FLAGS)
7471 set_tracer_flag(tr, 1 << i, 0);
7472 }
7473
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007474 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007475 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007476 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007477 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007478 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007479
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007480 for (i = 0; i < tr->nr_topts; i++) {
7481 kfree(tr->topts[i].topts);
7482 }
7483 kfree(tr->topts);
7484
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007485 kfree(tr->name);
7486 kfree(tr);
7487
7488 ret = 0;
7489
7490 out_unlock:
7491 mutex_unlock(&trace_types_lock);
7492
7493 return ret;
7494}
7495
Steven Rostedt277ba042012-08-03 16:10:49 -04007496static __init void create_trace_instances(struct dentry *d_tracer)
7497{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007498 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7499 instance_mkdir,
7500 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007501 if (WARN_ON(!trace_instance_dir))
7502 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007503}
7504
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007505static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007506init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007507{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007508 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007509
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007510 trace_create_file("available_tracers", 0444, d_tracer,
7511 tr, &show_traces_fops);
7512
7513 trace_create_file("current_tracer", 0644, d_tracer,
7514 tr, &set_tracer_fops);
7515
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007516 trace_create_file("tracing_cpumask", 0644, d_tracer,
7517 tr, &tracing_cpumask_fops);
7518
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007519 trace_create_file("trace_options", 0644, d_tracer,
7520 tr, &tracing_iter_fops);
7521
7522 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007523 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007524
7525 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007526 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007527
7528 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007529 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007530
7531 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7532 tr, &tracing_total_entries_fops);
7533
Wang YanQing238ae932013-05-26 16:52:01 +08007534 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007535 tr, &tracing_free_buffer_fops);
7536
7537 trace_create_file("trace_marker", 0220, d_tracer,
7538 tr, &tracing_mark_fops);
7539
Steven Rostedtfa32e852016-07-06 15:25:08 -04007540 trace_create_file("trace_marker_raw", 0220, d_tracer,
7541 tr, &tracing_mark_raw_fops);
7542
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007543 trace_create_file("trace_clock", 0644, d_tracer, tr,
7544 &trace_clock_fops);
7545
7546 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007547 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007548
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007549 create_trace_options_dir(tr);
7550
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007551#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007552 trace_create_file("tracing_max_latency", 0644, d_tracer,
7553 &tr->max_latency, &tracing_max_lat_fops);
7554#endif
7555
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007556 if (ftrace_create_function_files(tr, d_tracer))
7557 WARN(1, "Could not allocate function filter files");
7558
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007559#ifdef CONFIG_TRACER_SNAPSHOT
7560 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007561 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007562#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007563
7564 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007565 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007566
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007567 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007568}
7569
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007570static struct vfsmount *trace_automount(void *ingore)
7571{
7572 struct vfsmount *mnt;
7573 struct file_system_type *type;
7574
7575 /*
7576 * To maintain backward compatibility for tools that mount
7577 * debugfs to get to the tracing facility, tracefs is automatically
7578 * mounted to the debugfs/tracing directory.
7579 */
7580 type = get_fs_type("tracefs");
7581 if (!type)
7582 return NULL;
7583 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
7584 put_filesystem(type);
7585 if (IS_ERR(mnt))
7586 return NULL;
7587 mntget(mnt);
7588
7589 return mnt;
7590}
7591
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007592/**
7593 * tracing_init_dentry - initialize top level trace array
7594 *
7595 * This is called when creating files or directories in the tracing
7596 * directory. It is called via fs_initcall() by any of the boot up code
7597 * and expects to return the dentry of the top level tracing directory.
7598 */
7599struct dentry *tracing_init_dentry(void)
7600{
7601 struct trace_array *tr = &global_trace;
7602
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007603 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007604 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007605 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007606
Jiaxing Wang8b129192015-11-06 16:04:16 +08007607 if (WARN_ON(!tracefs_initialized()) ||
7608 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7609 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007610 return ERR_PTR(-ENODEV);
7611
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007612 /*
7613 * As there may still be users that expect the tracing
7614 * files to exist in debugfs/tracing, we must automount
7615 * the tracefs file system there, so older tools still
7616 * work with the newer kerenl.
7617 */
7618 tr->dir = debugfs_create_automount("tracing", NULL,
7619 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007620 if (!tr->dir) {
7621 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7622 return ERR_PTR(-ENOMEM);
7623 }
7624
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007625 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007626}
7627
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007628extern struct trace_enum_map *__start_ftrace_enum_maps[];
7629extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7630
7631static void __init trace_enum_init(void)
7632{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007633 int len;
7634
7635 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007636 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007637}
7638
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007639#ifdef CONFIG_MODULES
7640static void trace_module_add_enums(struct module *mod)
7641{
7642 if (!mod->num_trace_enums)
7643 return;
7644
7645 /*
7646 * Modules with bad taint do not have events created, do
7647 * not bother with enums either.
7648 */
7649 if (trace_module_has_bad_taint(mod))
7650 return;
7651
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007652 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007653}
7654
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007655#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7656static void trace_module_remove_enums(struct module *mod)
7657{
7658 union trace_enum_map_item *map;
7659 union trace_enum_map_item **last = &trace_enum_maps;
7660
7661 if (!mod->num_trace_enums)
7662 return;
7663
7664 mutex_lock(&trace_enum_mutex);
7665
7666 map = trace_enum_maps;
7667
7668 while (map) {
7669 if (map->head.mod == mod)
7670 break;
7671 map = trace_enum_jmp_to_tail(map);
7672 last = &map->tail.next;
7673 map = map->tail.next;
7674 }
7675 if (!map)
7676 goto out;
7677
7678 *last = trace_enum_jmp_to_tail(map)->tail.next;
7679 kfree(map);
7680 out:
7681 mutex_unlock(&trace_enum_mutex);
7682}
7683#else
7684static inline void trace_module_remove_enums(struct module *mod) { }
7685#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7686
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007687static int trace_module_notify(struct notifier_block *self,
7688 unsigned long val, void *data)
7689{
7690 struct module *mod = data;
7691
7692 switch (val) {
7693 case MODULE_STATE_COMING:
7694 trace_module_add_enums(mod);
7695 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007696 case MODULE_STATE_GOING:
7697 trace_module_remove_enums(mod);
7698 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007699 }
7700
7701 return 0;
7702}
7703
7704static struct notifier_block trace_module_nb = {
7705 .notifier_call = trace_module_notify,
7706 .priority = 0,
7707};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007708#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007709
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007710static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007711{
7712 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007713
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007714 trace_access_lock_init();
7715
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007716 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007717 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007718 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007719
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007720 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007721 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007722
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007723 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007724 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007725
Li Zefan339ae5d2009-04-17 10:34:30 +08007726 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007727 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007728
Avadh Patel69abe6a2009-04-10 16:04:48 -04007729 trace_create_file("saved_cmdlines", 0444, d_tracer,
7730 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007731
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007732 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7733 NULL, &tracing_saved_cmdlines_size_fops);
7734
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007735 trace_enum_init();
7736
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007737 trace_create_enum_file(d_tracer);
7738
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007739#ifdef CONFIG_MODULES
7740 register_module_notifier(&trace_module_nb);
7741#endif
7742
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007743#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007744 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7745 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007746#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007747
Steven Rostedt277ba042012-08-03 16:10:49 -04007748 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007749
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007750 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007751
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007752 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007753}
7754
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007755static int trace_panic_handler(struct notifier_block *this,
7756 unsigned long event, void *unused)
7757{
Steven Rostedt944ac422008-10-23 19:26:08 -04007758 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007759 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007760 return NOTIFY_OK;
7761}
7762
7763static struct notifier_block trace_panic_notifier = {
7764 .notifier_call = trace_panic_handler,
7765 .next = NULL,
7766 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7767};
7768
7769static int trace_die_handler(struct notifier_block *self,
7770 unsigned long val,
7771 void *data)
7772{
7773 switch (val) {
7774 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007775 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007776 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007777 break;
7778 default:
7779 break;
7780 }
7781 return NOTIFY_OK;
7782}
7783
7784static struct notifier_block trace_die_notifier = {
7785 .notifier_call = trace_die_handler,
7786 .priority = 200
7787};
7788
7789/*
7790 * printk is set to max of 1024, we really don't need it that big.
7791 * Nothing should be printing 1000 characters anyway.
7792 */
7793#define TRACE_MAX_PRINT 1000
7794
7795/*
7796 * Define here KERN_TRACE so that we have one place to modify
7797 * it if we decide to change what log level the ftrace dump
7798 * should be at.
7799 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007800#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007801
Jason Wessel955b61e2010-08-05 09:22:23 -05007802void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007803trace_printk_seq(struct trace_seq *s)
7804{
7805 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007806 if (s->seq.len >= TRACE_MAX_PRINT)
7807 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007808
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007809 /*
7810 * More paranoid code. Although the buffer size is set to
7811 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7812 * an extra layer of protection.
7813 */
7814 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7815 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007816
7817 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007818 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007819
7820 printk(KERN_TRACE "%s", s->buffer);
7821
Steven Rostedtf9520752009-03-02 14:04:40 -05007822 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007823}
7824
Jason Wessel955b61e2010-08-05 09:22:23 -05007825void trace_init_global_iter(struct trace_iterator *iter)
7826{
7827 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007828 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007829 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007830 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007831
7832 if (iter->trace && iter->trace->open)
7833 iter->trace->open(iter);
7834
7835 /* Annotate start of buffers if we had overruns */
7836 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7837 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7838
7839 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7840 if (trace_clocks[iter->tr->clock_id].in_ns)
7841 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007842}
7843
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007844void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007845{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007846 /* use static because iter can be a bit big for the stack */
7847 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007848 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007849 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007850 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007851 unsigned long flags;
7852 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007853
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007854 /* Only allow one dump user at a time. */
7855 if (atomic_inc_return(&dump_running) != 1) {
7856 atomic_dec(&dump_running);
7857 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007858 }
7859
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007860 /*
7861 * Always turn off tracing when we dump.
7862 * We don't need to show trace output of what happens
7863 * between multiple crashes.
7864 *
7865 * If the user does a sysrq-z, then they can re-enable
7866 * tracing with echo 1 > tracing_on.
7867 */
7868 tracing_off();
7869
7870 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007871
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007872 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007873 trace_init_global_iter(&iter);
7874
Steven Rostedtd7690412008-10-01 00:29:53 -04007875 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307876 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007877 }
7878
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007879 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007880
Török Edwinb54d3de2008-11-22 13:28:48 +02007881 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007882 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007883
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007884 switch (oops_dump_mode) {
7885 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007886 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007887 break;
7888 case DUMP_ORIG:
7889 iter.cpu_file = raw_smp_processor_id();
7890 break;
7891 case DUMP_NONE:
7892 goto out_enable;
7893 default:
7894 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007895 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007896 }
7897
7898 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007899
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007900 /* Did function tracer already get disabled? */
7901 if (ftrace_is_dead()) {
7902 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7903 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7904 }
7905
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007906 /*
7907 * We need to stop all tracing on all CPUS to read the
7908 * the next buffer. This is a bit expensive, but is
7909 * not done often. We fill all what we can read,
7910 * and then release the locks again.
7911 */
7912
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007913 while (!trace_empty(&iter)) {
7914
7915 if (!cnt)
7916 printk(KERN_TRACE "---------------------------------\n");
7917
7918 cnt++;
7919
7920 /* reset all but tr, trace, and overruns */
7921 memset(&iter.seq, 0,
7922 sizeof(struct trace_iterator) -
7923 offsetof(struct trace_iterator, seq));
7924 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7925 iter.pos = -1;
7926
Jason Wessel955b61e2010-08-05 09:22:23 -05007927 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007928 int ret;
7929
7930 ret = print_trace_line(&iter);
7931 if (ret != TRACE_TYPE_NO_CONSUME)
7932 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007933 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007934 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007935
7936 trace_printk_seq(&iter.seq);
7937 }
7938
7939 if (!cnt)
7940 printk(KERN_TRACE " (ftrace buffer empty)\n");
7941 else
7942 printk(KERN_TRACE "---------------------------------\n");
7943
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007944 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007945 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007946
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007947 for_each_tracing_cpu(cpu) {
7948 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007949 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007950 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007951 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007952}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007953EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007954
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007955__init static int tracer_alloc_buffers(void)
7956{
Steven Rostedt73c51622009-03-11 13:42:01 -04007957 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307958 int ret = -ENOMEM;
7959
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007960 /*
7961 * Make sure we don't accidently add more trace options
7962 * than we have bits for.
7963 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007964 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007965
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307966 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7967 goto out;
7968
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007969 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307970 goto out_free_buffer_mask;
7971
Steven Rostedt07d777f2011-09-22 14:01:55 -04007972 /* Only allocate trace_printk buffers if a trace_printk exists */
7973 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007974 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007975 trace_printk_init_buffers();
7976
Steven Rostedt73c51622009-03-11 13:42:01 -04007977 /* To save memory, keep the ring buffer size to its minimum */
7978 if (ring_buffer_expanded)
7979 ring_buf_size = trace_buf_size;
7980 else
7981 ring_buf_size = 1;
7982
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307983 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007984 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007985
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007986 raw_spin_lock_init(&global_trace.start_lock);
7987
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007988 /* Used for event triggers */
7989 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7990 if (!temp_buffer)
7991 goto out_free_cpumask;
7992
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007993 if (trace_create_savedcmd() < 0)
7994 goto out_free_temp_buffer;
7995
Steven Rostedtab464282008-05-12 21:21:00 +02007996 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007997 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007998 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7999 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008000 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008001 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04008002
Steven Rostedt499e5472012-02-22 15:50:28 -05008003 if (global_trace.buffer_disabled)
8004 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008005
Steven Rostedte1e232c2014-02-10 23:38:46 -05008006 if (trace_boot_clock) {
8007 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8008 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008009 pr_warn("Trace clock %s not defined, going back to default\n",
8010 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05008011 }
8012
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008013 /*
8014 * register_tracer() might reference current_trace, so it
8015 * needs to be set before we register anything. This is
8016 * just a bootstrap of current_trace anyway.
8017 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008018 global_trace.current_trace = &nop_trace;
8019
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008020 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8021
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05008022 ftrace_init_global_array_ops(&global_trace);
8023
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008024 init_trace_flags_index(&global_trace);
8025
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008026 register_tracer(&nop_trace);
8027
Steven Rostedt60a11772008-05-12 21:20:44 +02008028 /* All seems OK, enable tracing */
8029 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008030
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008031 atomic_notifier_chain_register(&panic_notifier_list,
8032 &trace_panic_notifier);
8033
8034 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008035
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008036 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8037
8038 INIT_LIST_HEAD(&global_trace.systems);
8039 INIT_LIST_HEAD(&global_trace.events);
8040 list_add(&global_trace.list, &ftrace_trace_arrays);
8041
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08008042 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04008043
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008044 register_snapshot_cmd();
8045
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008046 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008047
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008048out_free_savedcmd:
8049 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008050out_free_temp_buffer:
8051 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308052out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008053 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308054out_free_buffer_mask:
8055 free_cpumask_var(tracing_buffer_mask);
8056out:
8057 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008058}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008059
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008060void __init trace_init(void)
8061{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008062 if (tracepoint_printk) {
8063 tracepoint_print_iter =
8064 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8065 if (WARN_ON(!tracepoint_print_iter))
8066 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008067 else
8068 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008069 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008070 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008071 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008072}
8073
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008074__init static int clear_boot_tracer(void)
8075{
8076 /*
8077 * The default tracer at boot buffer is an init section.
8078 * This function is called in lateinit. If we did not
8079 * find the boot tracer, then clear it out, to prevent
8080 * later registration from accessing the buffer that is
8081 * about to be freed.
8082 */
8083 if (!default_bootup_tracer)
8084 return 0;
8085
8086 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8087 default_bootup_tracer);
8088 default_bootup_tracer = NULL;
8089
8090 return 0;
8091}
8092
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008093fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008094late_initcall(clear_boot_tracer);