blob: aee11e3a394f305d8829a4f0326d0d9999d8a74f [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080043#include <linux/trace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060044#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020046#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050047#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010049/*
Steven Rostedt73c51622009-03-11 13:42:01 -040050 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050053bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040054
55/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010059 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * at the same time, giving false positive or negative results.
61 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063
Steven Rostedtb2821ae2009-02-02 21:38:32 -050064/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
Li Zefan020e5f82009-07-01 10:47:05 +080067bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050068
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050069/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050072static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050073
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010074/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050079static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010081{
82 return 0;
83}
Steven Rostedt0f048702008-11-05 16:05:44 -050084
85/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040086 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070090static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040091
92/*
Steven Rostedt0f048702008-11-05 16:05:44 -050093 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
Hannes Eder4fd27352009-02-10 19:44:12 +010098static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050099
Jason Wessel955b61e2010-08-05 09:22:23 -0500100cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200101
Steven Rostedt944ac422008-10-23 19:26:08 -0400102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400116 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200117
118enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400119
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
Jeremy Linton681bec02017-05-31 16:56:53 -0500123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
124/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500125struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400126 struct module *mod;
127 unsigned long length;
128};
129
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500130union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400131
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500132struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400133 /*
134 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500135 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400136 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500137 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400138 const char *end; /* points to NULL */
139};
140
Jeremy Linton1793ed92017-05-31 16:56:46 -0500141static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142
143/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500144 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500148 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400149 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500151 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400154};
155
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400158
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500160
Li Zefanee6c2c12009-09-18 14:06:47 +0800161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100164
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165static bool allocate_snapshot;
166
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200167static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100168{
Chen Gang67012ab2013-04-08 12:06:44 +0800169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500170 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400171 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173 return 1;
174}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176
Steven Rostedt944ac422008-10-23 19:26:08 -0400177static int __init set_ftrace_dump_on_oops(char *str)
178{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200192
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400193static int __init stop_trace_on_warning(char *str)
194{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400197 return 1;
198}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200199__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400201static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400208__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500209
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static int __init set_trace_boot_options(char *str)
214{
Chen Gang67012ab2013-04-08 12:06:44 +0800215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
Steven Rostedte1e232c2014-02-10 23:38:46 -0500220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400238
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100239unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200265 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200269
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400270LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400303int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400309 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 return 1;
311 }
312
313 return 0;
314}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500315
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
Steven Rostedtd8275c42016-04-14 12:15:22 -0400322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400516 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700573 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400574 return trace_clock_local();
575
Alexander Z Lam94571582013-08-02 18:36:16 -0700576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400578
579 return ts;
580}
581
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100582u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
Steven Rostedt90369902008-11-05 16:05:44 -0500596int tracing_is_enabled(void)
597{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500605}
606
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200607/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400618
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200622static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700627DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500657 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
Steven Rostedtae3b5092013-01-23 15:22:59 -0500663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500673 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400730{
731}
732
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400733#endif
734
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400760void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200777/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700793 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800818 int pc;
819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800821 return 0;
822
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800823 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500824
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800868 int pc;
869
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800871 return 0;
872
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800873 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500874
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500896#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400897static void tracing_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500898{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500908 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400926
927/**
928 * trace_snapshot - take a snapshot of the current buffer.
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500947EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953static int alloc_snapshot(struct trace_array *tr)
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
Fabian Frederickad1438a2014-04-17 21:44:42 +0200971static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500983
984/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
999 ret = alloc_snapshot(tr);
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
1006/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001019 int ret;
1020
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001023 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001024
1025 tracing_snapshot();
1026}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001033EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001048void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
Steven Rostedt499e5472012-02-22 15:50:28 -05001065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001075 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
Steven Rostedt499e5472012-02-22 15:50:28 -05001085/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001091int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
Steven Rostedt499e5472012-02-22 15:50:28 -05001098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001103 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001107static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001108{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001109 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001110
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111 if (!str)
1112 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001113 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001114 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001115 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001116 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001117 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001118 return 1;
1119}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001120__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001121
Tim Bird0e950172010-02-25 15:36:43 -08001122static int __init set_tracing_thresh(char *str)
1123{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001124 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001125 int ret;
1126
1127 if (!str)
1128 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001129 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001130 if (ret < 0)
1131 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001132 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
Steven Rostedt57f50be2008-05-12 21:20:44 +02001137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001146 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001147 */
1148#undef C
1149#define C(a, b) b
1150
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001151/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001152static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001153 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001154 NULL
1155};
1156
Zhaolei5079f322009-08-25 16:12:56 +08001157static struct {
1158 u64 (*func)(void);
1159 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001160 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001161} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001165 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Joel Fernandes80ec3552016-11-28 14:35:23 -08001169 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001170 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001171};
1172
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001173/*
1174 * trace_parser_get_init - gets the buffer for trace parser
1175 */
1176int trace_parser_get_init(struct trace_parser *parser, int size)
1177{
1178 memset(parser, 0, sizeof(*parser));
1179
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1182 return 1;
1183
1184 parser->size = size;
1185 return 0;
1186}
1187
1188/*
1189 * trace_parser_put - frees the buffer for trace parser
1190 */
1191void trace_parser_put(struct trace_parser *parser)
1192{
1193 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001194 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1210{
1211 char ch;
1212 size_t read = 0;
1213 ssize_t ret;
1214
1215 if (!*ppos)
1216 trace_parser_clear(parser);
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221
1222 read++;
1223 cnt--;
1224
1225 /*
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1228 */
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1233 if (ret)
1234 goto out;
1235 read++;
1236 cnt--;
1237 }
1238
1239 /* only spaces were written */
1240 if (isspace(ch)) {
1241 *ppos += read;
1242 ret = read;
1243 goto out;
1244 }
1245
1246 parser->idx = 0;
1247 }
1248
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001251 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001252 parser->buffer[parser->idx++] = ch;
1253 else {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257 ret = get_user(ch, ubuf++);
1258 if (ret)
1259 goto out;
1260 read++;
1261 cnt--;
1262 }
1263
1264 /* We either got finished input or we have to wait for another call. */
1265 if (isspace(ch)) {
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001268 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001271 } else {
1272 ret = -EINVAL;
1273 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001274 }
1275
1276 *ppos += read;
1277 ret = read;
1278
1279out:
1280 return ret;
1281}
1282
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001283/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001285{
1286 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001287
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001288 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001289 return -EBUSY;
1290
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001291 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001292 if (cnt > len)
1293 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001295
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001296 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001297 return cnt;
1298}
1299
Tim Bird0e950172010-02-25 15:36:43 -08001300unsigned long __read_mostly tracing_thresh;
1301
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001302#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001315
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001316 max_buf->cpu = cpu;
1317 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001318
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001319 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001322
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001324 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001325 /*
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1328 */
1329 if (tsk == current)
1330 max_data->uid = current_uid();
1331 else
1332 max_data->uid = task_uid(tsk);
1333
Steven Rostedt8248ac02009-09-02 12:27:41 -04001334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001337
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1340}
1341
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
Ingo Molnare309b412008-05-12 21:20:51 +02001351void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001354 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001356 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001357 return;
1358
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001359 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001360
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001361 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001362 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001364 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001365 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001366
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001367 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001368
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001372
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001373 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001374 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001375}
1376
1377/**
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1379 * @tr - tracer
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001382 *
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001384 */
Ingo Molnare309b412008-05-12 21:20:51 +02001385void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001386update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001388 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001389
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001390 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001391 return;
1392
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001393 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001394 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001395 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001397 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001398 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001399
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001400 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001401
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001403
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001404 if (ret == -EBUSY) {
1405 /*
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1410 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001412 "Failed to swap buffers due to commit in progress\n");
1413 }
1414
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001416
1417 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001418 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001419}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001420#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001421
Rabin Vincente30f53a2014-11-10 19:46:34 +01001422static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001423{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001426 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001427
Rabin Vincente30f53a2014-11-10 19:46:34 +01001428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001430}
1431
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001432#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001433static bool selftests_can_run;
1434
1435struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1438};
1439
1440static LIST_HEAD(postponed_selftests);
1441
1442static int save_selftest(struct tracer *type)
1443{
1444 struct trace_selftests *selftest;
1445
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447 if (!selftest)
1448 return -ENOMEM;
1449
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1452 return 0;
1453}
1454
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001455static int run_tracer_selftest(struct tracer *type)
1456{
1457 struct trace_array *tr = &global_trace;
1458 struct tracer *saved_tracer = tr->current_trace;
1459 int ret;
1460
1461 if (!type->selftest || tracing_selftest_disabled)
1462 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001463
1464 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1468 */
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1471
1472 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001478 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001479 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001480
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001481 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001483#ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type->use_max_tr) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded)
1487 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488 RING_BUFFER_ALL_CPUS);
1489 tr->allocated_snapshot = true;
1490 }
1491#endif
1492
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type->name);
1495 ret = type->selftest(type, tr);
1496 /* the test is responsible for resetting too */
1497 tr->current_trace = saved_tracer;
1498 if (ret) {
1499 printk(KERN_CONT "FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1501 WARN_ON(1);
1502 return -1;
1503 }
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr->trace_buffer);
1506
1507#ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type->use_max_tr) {
1509 tr->allocated_snapshot = false;
1510
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded)
1513 ring_buffer_resize(tr->max_buffer.buffer, 1,
1514 RING_BUFFER_ALL_CPUS);
1515 }
1516#endif
1517
1518 printk(KERN_CONT "PASSED\n");
1519 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001521
1522static __init int init_trace_selftests(void)
1523{
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1526 int ret;
1527
1528 selftests_can_run = true;
1529
1530 mutex_lock(&trace_types_lock);
1531
1532 if (list_empty(&postponed_selftests))
1533 goto out;
1534
1535 pr_info("Running postponed tracer tests:\n");
1536
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1540 if (ret < 0) {
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1542 p->type->name);
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1545 if (t == p->type) {
1546 *last = t->next;
1547 break;
1548 }
1549 last = &t->next;
1550 }
1551 }
1552 list_del(&p->list);
1553 kfree(p);
1554 }
1555
1556 out:
1557 mutex_unlock(&trace_types_lock);
1558
1559 return 0;
1560}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001561core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001562#else
1563static inline int run_tracer_selftest(struct tracer *type)
1564{
1565 return 0;
1566}
1567#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001568
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001569static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1570
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001571static void __init apply_trace_boot_options(void);
1572
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001573/**
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1576 *
1577 * Register a new plugin tracer.
1578 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001579int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580{
1581 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001582 int ret = 0;
1583
1584 if (!type->name) {
1585 pr_info("Tracer must have a name\n");
1586 return -1;
1587 }
1588
Dan Carpenter24a461d2010-07-10 12:06:44 +02001589 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1591 return -1;
1592 }
1593
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001594 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001595
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001596 tracing_selftest_running = true;
1597
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001598 for (t = trace_types; t; t = t->next) {
1599 if (strcmp(type->name, t->name) == 0) {
1600 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001601 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 type->name);
1603 ret = -1;
1604 goto out;
1605 }
1606 }
1607
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001608 if (!type->set_flag)
1609 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001610 if (!type->flags) {
1611 /*allocate a dummy tracer_flags*/
1612 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001613 if (!type->flags) {
1614 ret = -ENOMEM;
1615 goto out;
1616 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001617 type->flags->val = 0;
1618 type->flags->opts = dummy_tracer_opt;
1619 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001620 if (!type->flags->opts)
1621 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001622
Chunyu Hud39cdd22016-03-08 21:37:01 +08001623 /* store the tracer for __set_tracer_option */
1624 type->flags->trace = type;
1625
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001626 ret = run_tracer_selftest(type);
1627 if (ret < 0)
1628 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001629
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001630 type->next = trace_types;
1631 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001632 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001633
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001634 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001635 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001636 mutex_unlock(&trace_types_lock);
1637
Steven Rostedtdac74942009-02-05 01:13:38 -05001638 if (ret || !default_bootup_tracer)
1639 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001640
Li Zefanee6c2c12009-09-18 14:06:47 +08001641 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001642 goto out_unlock;
1643
1644 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001646 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001647 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001648
1649 apply_trace_boot_options();
1650
Steven Rostedtdac74942009-02-05 01:13:38 -05001651 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001652 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001653#ifdef CONFIG_FTRACE_STARTUP_TEST
1654 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1655 type->name);
1656#endif
1657
1658 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001659 return ret;
1660}
1661
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001662void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001663{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001664 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001665
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001666 if (!buffer)
1667 return;
1668
Steven Rostedtf6339032009-09-04 12:35:16 -04001669 ring_buffer_record_disable(buffer);
1670
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001673 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001674
1675 ring_buffer_record_enable(buffer);
1676}
1677
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001678void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001679{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001680 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001681 int cpu;
1682
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001683 if (!buffer)
1684 return;
1685
Steven Rostedt621968c2009-09-04 12:02:35 -04001686 ring_buffer_record_disable(buffer);
1687
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1690
Alexander Z Lam94571582013-08-02 18:36:16 -07001691 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001692
1693 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001694 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001695
1696 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001697}
1698
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001699/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001700void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001701{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001702 struct trace_array *tr;
1703
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001705 tracing_reset_online_cpus(&tr->trace_buffer);
1706#ifdef CONFIG_TRACER_MAX_TRACE
1707 tracing_reset_online_cpus(&tr->max_buffer);
1708#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001709 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001710}
1711
Joel Fernandesd914ba32017-06-26 19:01:55 -07001712static int *tgid_map;
1713
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001714#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001715#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001716static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001717struct saved_cmdlines_buffer {
1718 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1719 unsigned *map_cmdline_to_pid;
1720 unsigned cmdline_num;
1721 int cmdline_idx;
1722 char *saved_cmdlines;
1723};
1724static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001725
Steven Rostedt25b0b442008-05-12 21:21:00 +02001726/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07001727static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001728
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001729static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001730{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001731 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1732}
1733
1734static inline void set_cmdline(int idx, const char *cmdline)
1735{
1736 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1737}
1738
1739static int allocate_cmdlines_buffer(unsigned int val,
1740 struct saved_cmdlines_buffer *s)
1741{
1742 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1743 GFP_KERNEL);
1744 if (!s->map_cmdline_to_pid)
1745 return -ENOMEM;
1746
1747 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1748 if (!s->saved_cmdlines) {
1749 kfree(s->map_cmdline_to_pid);
1750 return -ENOMEM;
1751 }
1752
1753 s->cmdline_idx = 0;
1754 s->cmdline_num = val;
1755 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1756 sizeof(s->map_pid_to_cmdline));
1757 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1758 val * sizeof(*s->map_cmdline_to_pid));
1759
1760 return 0;
1761}
1762
1763static int trace_create_savedcmd(void)
1764{
1765 int ret;
1766
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001767 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001768 if (!savedcmd)
1769 return -ENOMEM;
1770
1771 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1772 if (ret < 0) {
1773 kfree(savedcmd);
1774 savedcmd = NULL;
1775 return -ENOMEM;
1776 }
1777
1778 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001779}
1780
Carsten Emdeb5130b12009-09-13 01:43:07 +02001781int is_tracing_stopped(void)
1782{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001783 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001784}
1785
Steven Rostedt0f048702008-11-05 16:05:44 -05001786/**
1787 * tracing_start - quick start of the tracer
1788 *
1789 * If tracing is enabled but was stopped by tracing_stop,
1790 * this will start the tracer back up.
1791 */
1792void tracing_start(void)
1793{
1794 struct ring_buffer *buffer;
1795 unsigned long flags;
1796
1797 if (tracing_disabled)
1798 return;
1799
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001800 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1801 if (--global_trace.stop_count) {
1802 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001803 /* Someone screwed up their debugging */
1804 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001805 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001806 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001807 goto out;
1808 }
1809
Steven Rostedta2f80712010-03-12 19:56:00 -05001810 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001811 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001812
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001813 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001814 if (buffer)
1815 ring_buffer_record_enable(buffer);
1816
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001817#ifdef CONFIG_TRACER_MAX_TRACE
1818 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001819 if (buffer)
1820 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001821#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001822
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001823 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001824
Steven Rostedt0f048702008-11-05 16:05:44 -05001825 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001826 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1827}
1828
1829static void tracing_start_tr(struct trace_array *tr)
1830{
1831 struct ring_buffer *buffer;
1832 unsigned long flags;
1833
1834 if (tracing_disabled)
1835 return;
1836
1837 /* If global, we need to also start the max tracer */
1838 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1839 return tracing_start();
1840
1841 raw_spin_lock_irqsave(&tr->start_lock, flags);
1842
1843 if (--tr->stop_count) {
1844 if (tr->stop_count < 0) {
1845 /* Someone screwed up their debugging */
1846 WARN_ON_ONCE(1);
1847 tr->stop_count = 0;
1848 }
1849 goto out;
1850 }
1851
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001852 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001853 if (buffer)
1854 ring_buffer_record_enable(buffer);
1855
1856 out:
1857 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001858}
1859
1860/**
1861 * tracing_stop - quick stop of the tracer
1862 *
1863 * Light weight way to stop tracing. Use in conjunction with
1864 * tracing_start.
1865 */
1866void tracing_stop(void)
1867{
1868 struct ring_buffer *buffer;
1869 unsigned long flags;
1870
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001871 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1872 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001873 goto out;
1874
Steven Rostedta2f80712010-03-12 19:56:00 -05001875 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001876 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001877
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001878 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001879 if (buffer)
1880 ring_buffer_record_disable(buffer);
1881
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001882#ifdef CONFIG_TRACER_MAX_TRACE
1883 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001884 if (buffer)
1885 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001886#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001887
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001888 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001889
Steven Rostedt0f048702008-11-05 16:05:44 -05001890 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001891 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1892}
1893
1894static void tracing_stop_tr(struct trace_array *tr)
1895{
1896 struct ring_buffer *buffer;
1897 unsigned long flags;
1898
1899 /* If global, we need to also stop the max tracer */
1900 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1901 return tracing_stop();
1902
1903 raw_spin_lock_irqsave(&tr->start_lock, flags);
1904 if (tr->stop_count++)
1905 goto out;
1906
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001907 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001908 if (buffer)
1909 ring_buffer_record_disable(buffer);
1910
1911 out:
1912 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001913}
1914
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001915static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001916{
Carsten Emdea635cf02009-03-18 09:00:41 +01001917 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001918
Joel Fernandeseaf260a2017-07-06 16:00:21 -07001919 /* treat recording of idle task as a success */
1920 if (!tsk->pid)
1921 return 1;
1922
1923 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001924 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001925
1926 /*
1927 * It's not the end of the world if we don't get
1928 * the lock, but we also don't want to spin
1929 * nor do we want to disable interrupts,
1930 * so if we miss here, then better luck next time.
1931 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001932 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001933 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001934
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001935 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001936 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001937 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001938
Carsten Emdea635cf02009-03-18 09:00:41 +01001939 /*
1940 * Check whether the cmdline buffer at idx has a pid
1941 * mapped. We are going to overwrite that entry so we
1942 * need to clear the map_pid_to_cmdline. Otherwise we
1943 * would read the new comm for the old pid.
1944 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001945 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001946 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001947 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001948
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001949 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1950 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001951
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001952 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001953 }
1954
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001955 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001956
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001957 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001958
1959 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001960}
1961
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001962static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001963{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001964 unsigned map;
1965
Steven Rostedt4ca530852009-03-16 19:20:15 -04001966 if (!pid) {
1967 strcpy(comm, "<idle>");
1968 return;
1969 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001970
Steven Rostedt74bf4072010-01-25 15:11:53 -05001971 if (WARN_ON_ONCE(pid < 0)) {
1972 strcpy(comm, "<XXX>");
1973 return;
1974 }
1975
Steven Rostedt4ca530852009-03-16 19:20:15 -04001976 if (pid > PID_MAX_DEFAULT) {
1977 strcpy(comm, "<...>");
1978 return;
1979 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001980
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001981 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001982 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05301983 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01001984 else
1985 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001986}
1987
1988void trace_find_cmdline(int pid, char comm[])
1989{
1990 preempt_disable();
1991 arch_spin_lock(&trace_cmdline_lock);
1992
1993 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001994
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001995 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001996 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001997}
1998
Joel Fernandesd914ba32017-06-26 19:01:55 -07001999int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002000{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002001 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2002 return 0;
2003
2004 return tgid_map[pid];
2005}
2006
2007static int trace_save_tgid(struct task_struct *tsk)
2008{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002009 /* treat recording of idle task as a success */
2010 if (!tsk->pid)
2011 return 1;
2012
2013 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002014 return 0;
2015
2016 tgid_map[tsk->pid] = tsk->tgid;
2017 return 1;
2018}
2019
2020static bool tracing_record_taskinfo_skip(int flags)
2021{
2022 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2023 return true;
2024 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2025 return true;
2026 if (!__this_cpu_read(trace_taskinfo_save))
2027 return true;
2028 return false;
2029}
2030
2031/**
2032 * tracing_record_taskinfo - record the task info of a task
2033 *
2034 * @task - task to record
2035 * @flags - TRACE_RECORD_CMDLINE for recording comm
2036 * - TRACE_RECORD_TGID for recording tgid
2037 */
2038void tracing_record_taskinfo(struct task_struct *task, int flags)
2039{
2040 if (tracing_record_taskinfo_skip(flags))
2041 return;
2042 if ((flags & TRACE_RECORD_CMDLINE) && !trace_save_cmdline(task))
2043 return;
2044 if ((flags & TRACE_RECORD_TGID) && !trace_save_tgid(task))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002045 return;
2046
Joel Fernandesd914ba32017-06-26 19:01:55 -07002047 __this_cpu_write(trace_taskinfo_save, false);
2048}
2049
2050/**
2051 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2052 *
2053 * @prev - previous task during sched_switch
2054 * @next - next task during sched_switch
2055 * @flags - TRACE_RECORD_CMDLINE for recording comm
2056 * TRACE_RECORD_TGID for recording tgid
2057 */
2058void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2059 struct task_struct *next, int flags)
2060{
2061 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002062 return;
2063
Joel Fernandesd914ba32017-06-26 19:01:55 -07002064 if ((flags & TRACE_RECORD_CMDLINE) &&
2065 (!trace_save_cmdline(prev) || !trace_save_cmdline(next)))
2066 return;
2067
2068 if ((flags & TRACE_RECORD_TGID) &&
2069 (!trace_save_tgid(prev) || !trace_save_tgid(next)))
2070 return;
2071
2072 __this_cpu_write(trace_taskinfo_save, false);
2073}
2074
2075/* Helpers to record a specific task information */
2076void tracing_record_cmdline(struct task_struct *task)
2077{
2078 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2079}
2080
2081void tracing_record_tgid(struct task_struct *task)
2082{
2083 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002084}
2085
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002086/*
2087 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2088 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2089 * simplifies those functions and keeps them in sync.
2090 */
2091enum print_line_t trace_handle_return(struct trace_seq *s)
2092{
2093 return trace_seq_has_overflowed(s) ?
2094 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2095}
2096EXPORT_SYMBOL_GPL(trace_handle_return);
2097
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002098void
Steven Rostedt38697052008-10-01 13:14:09 -04002099tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2100 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002101{
2102 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002103
Steven Rostedt777e2082008-09-29 23:02:42 -04002104 entry->preempt_count = pc & 0xff;
2105 entry->pid = (tsk) ? tsk->pid : 0;
2106 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002107#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002108 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002109#else
2110 TRACE_FLAG_IRQS_NOSUPPORT |
2111#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002112 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002113 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302114 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002115 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2116 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002117}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002118EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002119
Steven Rostedte77405a2009-09-02 14:17:06 -04002120struct ring_buffer_event *
2121trace_buffer_lock_reserve(struct ring_buffer *buffer,
2122 int type,
2123 unsigned long len,
2124 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002125{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002126 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002127}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002128
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002129DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2130DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2131static int trace_buffered_event_ref;
2132
2133/**
2134 * trace_buffered_event_enable - enable buffering events
2135 *
2136 * When events are being filtered, it is quicker to use a temporary
2137 * buffer to write the event data into if there's a likely chance
2138 * that it will not be committed. The discard of the ring buffer
2139 * is not as fast as committing, and is much slower than copying
2140 * a commit.
2141 *
2142 * When an event is to be filtered, allocate per cpu buffers to
2143 * write the event data into, and if the event is filtered and discarded
2144 * it is simply dropped, otherwise, the entire data is to be committed
2145 * in one shot.
2146 */
2147void trace_buffered_event_enable(void)
2148{
2149 struct ring_buffer_event *event;
2150 struct page *page;
2151 int cpu;
2152
2153 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2154
2155 if (trace_buffered_event_ref++)
2156 return;
2157
2158 for_each_tracing_cpu(cpu) {
2159 page = alloc_pages_node(cpu_to_node(cpu),
2160 GFP_KERNEL | __GFP_NORETRY, 0);
2161 if (!page)
2162 goto failed;
2163
2164 event = page_address(page);
2165 memset(event, 0, sizeof(*event));
2166
2167 per_cpu(trace_buffered_event, cpu) = event;
2168
2169 preempt_disable();
2170 if (cpu == smp_processor_id() &&
2171 this_cpu_read(trace_buffered_event) !=
2172 per_cpu(trace_buffered_event, cpu))
2173 WARN_ON_ONCE(1);
2174 preempt_enable();
2175 }
2176
2177 return;
2178 failed:
2179 trace_buffered_event_disable();
2180}
2181
2182static void enable_trace_buffered_event(void *data)
2183{
2184 /* Probably not needed, but do it anyway */
2185 smp_rmb();
2186 this_cpu_dec(trace_buffered_event_cnt);
2187}
2188
2189static void disable_trace_buffered_event(void *data)
2190{
2191 this_cpu_inc(trace_buffered_event_cnt);
2192}
2193
2194/**
2195 * trace_buffered_event_disable - disable buffering events
2196 *
2197 * When a filter is removed, it is faster to not use the buffered
2198 * events, and to commit directly into the ring buffer. Free up
2199 * the temp buffers when there are no more users. This requires
2200 * special synchronization with current events.
2201 */
2202void trace_buffered_event_disable(void)
2203{
2204 int cpu;
2205
2206 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2207
2208 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2209 return;
2210
2211 if (--trace_buffered_event_ref)
2212 return;
2213
2214 preempt_disable();
2215 /* For each CPU, set the buffer as used. */
2216 smp_call_function_many(tracing_buffer_mask,
2217 disable_trace_buffered_event, NULL, 1);
2218 preempt_enable();
2219
2220 /* Wait for all current users to finish */
2221 synchronize_sched();
2222
2223 for_each_tracing_cpu(cpu) {
2224 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2225 per_cpu(trace_buffered_event, cpu) = NULL;
2226 }
2227 /*
2228 * Make sure trace_buffered_event is NULL before clearing
2229 * trace_buffered_event_cnt.
2230 */
2231 smp_wmb();
2232
2233 preempt_disable();
2234 /* Do the work on each cpu */
2235 smp_call_function_many(tracing_buffer_mask,
2236 enable_trace_buffered_event, NULL, 1);
2237 preempt_enable();
2238}
2239
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002240static struct ring_buffer *temp_buffer;
2241
Steven Rostedtef5580d2009-02-27 19:38:04 -05002242struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002243trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002244 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002245 int type, unsigned long len,
2246 unsigned long flags, int pc)
2247{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002248 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002249 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002250
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002251 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002252
2253 if ((trace_file->flags &
2254 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2255 (entry = this_cpu_read(trace_buffered_event))) {
2256 /* Try to use the per cpu buffer first */
2257 val = this_cpu_inc_return(trace_buffered_event_cnt);
2258 if (val == 1) {
2259 trace_event_setup(entry, type, flags, pc);
2260 entry->array[0] = len;
2261 return entry;
2262 }
2263 this_cpu_dec(trace_buffered_event_cnt);
2264 }
2265
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002266 entry = __trace_buffer_lock_reserve(*current_rb,
2267 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002268 /*
2269 * If tracing is off, but we have triggers enabled
2270 * we still need to look at the event data. Use the temp_buffer
2271 * to store the trace event for the tigger to use. It's recusive
2272 * safe and will not be recorded anywhere.
2273 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002274 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002275 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002276 entry = __trace_buffer_lock_reserve(*current_rb,
2277 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002278 }
2279 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002280}
2281EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2282
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002283static DEFINE_SPINLOCK(tracepoint_iter_lock);
2284static DEFINE_MUTEX(tracepoint_printk_mutex);
2285
2286static void output_printk(struct trace_event_buffer *fbuffer)
2287{
2288 struct trace_event_call *event_call;
2289 struct trace_event *event;
2290 unsigned long flags;
2291 struct trace_iterator *iter = tracepoint_print_iter;
2292
2293 /* We should never get here if iter is NULL */
2294 if (WARN_ON_ONCE(!iter))
2295 return;
2296
2297 event_call = fbuffer->trace_file->event_call;
2298 if (!event_call || !event_call->event.funcs ||
2299 !event_call->event.funcs->trace)
2300 return;
2301
2302 event = &fbuffer->trace_file->event_call->event;
2303
2304 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2305 trace_seq_init(&iter->seq);
2306 iter->ent = fbuffer->entry;
2307 event_call->event.funcs->trace(iter, 0, event);
2308 trace_seq_putc(&iter->seq, 0);
2309 printk("%s", iter->seq.buffer);
2310
2311 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2312}
2313
2314int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2315 void __user *buffer, size_t *lenp,
2316 loff_t *ppos)
2317{
2318 int save_tracepoint_printk;
2319 int ret;
2320
2321 mutex_lock(&tracepoint_printk_mutex);
2322 save_tracepoint_printk = tracepoint_printk;
2323
2324 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2325
2326 /*
2327 * This will force exiting early, as tracepoint_printk
2328 * is always zero when tracepoint_printk_iter is not allocated
2329 */
2330 if (!tracepoint_print_iter)
2331 tracepoint_printk = 0;
2332
2333 if (save_tracepoint_printk == tracepoint_printk)
2334 goto out;
2335
2336 if (tracepoint_printk)
2337 static_key_enable(&tracepoint_printk_key.key);
2338 else
2339 static_key_disable(&tracepoint_printk_key.key);
2340
2341 out:
2342 mutex_unlock(&tracepoint_printk_mutex);
2343
2344 return ret;
2345}
2346
2347void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2348{
2349 if (static_key_false(&tracepoint_printk_key.key))
2350 output_printk(fbuffer);
2351
2352 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2353 fbuffer->event, fbuffer->entry,
2354 fbuffer->flags, fbuffer->pc);
2355}
2356EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2357
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002358void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2359 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002360 struct ring_buffer_event *event,
2361 unsigned long flags, int pc,
2362 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002363{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002364 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002365
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002366 /*
2367 * If regs is not set, then skip the following callers:
2368 * trace_buffer_unlock_commit_regs
2369 * event_trigger_unlock_commit
2370 * trace_event_buffer_commit
2371 * trace_event_raw_event_sched_switch
2372 * Note, we can still get here via blktrace, wakeup tracer
2373 * and mmiotrace, but that's ok if they lose a function or
2374 * two. They are that meaningful.
2375 */
2376 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002377 ftrace_trace_userstack(buffer, flags, pc);
2378}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002379
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002380/*
2381 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2382 */
2383void
2384trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2385 struct ring_buffer_event *event)
2386{
2387 __buffer_unlock_commit(buffer, event);
2388}
2389
Chunyan Zhang478409d2016-11-21 15:57:18 +08002390static void
2391trace_process_export(struct trace_export *export,
2392 struct ring_buffer_event *event)
2393{
2394 struct trace_entry *entry;
2395 unsigned int size = 0;
2396
2397 entry = ring_buffer_event_data(event);
2398 size = ring_buffer_event_length(event);
2399 export->write(entry, size);
2400}
2401
2402static DEFINE_MUTEX(ftrace_export_lock);
2403
2404static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2405
2406static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2407
2408static inline void ftrace_exports_enable(void)
2409{
2410 static_branch_enable(&ftrace_exports_enabled);
2411}
2412
2413static inline void ftrace_exports_disable(void)
2414{
2415 static_branch_disable(&ftrace_exports_enabled);
2416}
2417
2418void ftrace_exports(struct ring_buffer_event *event)
2419{
2420 struct trace_export *export;
2421
2422 preempt_disable_notrace();
2423
2424 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2425 while (export) {
2426 trace_process_export(export, event);
2427 export = rcu_dereference_raw_notrace(export->next);
2428 }
2429
2430 preempt_enable_notrace();
2431}
2432
2433static inline void
2434add_trace_export(struct trace_export **list, struct trace_export *export)
2435{
2436 rcu_assign_pointer(export->next, *list);
2437 /*
2438 * We are entering export into the list but another
2439 * CPU might be walking that list. We need to make sure
2440 * the export->next pointer is valid before another CPU sees
2441 * the export pointer included into the list.
2442 */
2443 rcu_assign_pointer(*list, export);
2444}
2445
2446static inline int
2447rm_trace_export(struct trace_export **list, struct trace_export *export)
2448{
2449 struct trace_export **p;
2450
2451 for (p = list; *p != NULL; p = &(*p)->next)
2452 if (*p == export)
2453 break;
2454
2455 if (*p != export)
2456 return -1;
2457
2458 rcu_assign_pointer(*p, (*p)->next);
2459
2460 return 0;
2461}
2462
2463static inline void
2464add_ftrace_export(struct trace_export **list, struct trace_export *export)
2465{
2466 if (*list == NULL)
2467 ftrace_exports_enable();
2468
2469 add_trace_export(list, export);
2470}
2471
2472static inline int
2473rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2474{
2475 int ret;
2476
2477 ret = rm_trace_export(list, export);
2478 if (*list == NULL)
2479 ftrace_exports_disable();
2480
2481 return ret;
2482}
2483
2484int register_ftrace_export(struct trace_export *export)
2485{
2486 if (WARN_ON_ONCE(!export->write))
2487 return -1;
2488
2489 mutex_lock(&ftrace_export_lock);
2490
2491 add_ftrace_export(&ftrace_exports_list, export);
2492
2493 mutex_unlock(&ftrace_export_lock);
2494
2495 return 0;
2496}
2497EXPORT_SYMBOL_GPL(register_ftrace_export);
2498
2499int unregister_ftrace_export(struct trace_export *export)
2500{
2501 int ret;
2502
2503 mutex_lock(&ftrace_export_lock);
2504
2505 ret = rm_ftrace_export(&ftrace_exports_list, export);
2506
2507 mutex_unlock(&ftrace_export_lock);
2508
2509 return ret;
2510}
2511EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2512
Ingo Molnare309b412008-05-12 21:20:51 +02002513void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002514trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002515 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2516 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002517{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002518 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002519 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002520 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002521 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002523 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2524 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002525 if (!event)
2526 return;
2527 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002528 entry->ip = ip;
2529 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002530
Chunyan Zhang478409d2016-11-21 15:57:18 +08002531 if (!call_filter_check_discard(call, entry, buffer, event)) {
2532 if (static_branch_unlikely(&ftrace_exports_enabled))
2533 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002534 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002535 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002536}
2537
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002538#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002539
2540#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2541struct ftrace_stack {
2542 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2543};
2544
2545static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2546static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2547
Steven Rostedte77405a2009-09-02 14:17:06 -04002548static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002549 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002550 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002551{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002552 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002553 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002554 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002555 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002556 int use_stack;
2557 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002558
2559 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002560 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002561
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002562 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002563 * Add two, for this function and the call to save_stack_trace()
2564 * If regs is set, then these functions will not be in the way.
2565 */
2566 if (!regs)
2567 trace.skip += 2;
2568
2569 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002570 * Since events can happen in NMIs there's no safe way to
2571 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2572 * or NMI comes in, it will just have to use the default
2573 * FTRACE_STACK_SIZE.
2574 */
2575 preempt_disable_notrace();
2576
Shan Wei82146522012-11-19 13:21:01 +08002577 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002578 /*
2579 * We don't need any atomic variables, just a barrier.
2580 * If an interrupt comes in, we don't care, because it would
2581 * have exited and put the counter back to what we want.
2582 * We just need a barrier to keep gcc from moving things
2583 * around.
2584 */
2585 barrier();
2586 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002587 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002588 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2589
2590 if (regs)
2591 save_stack_trace_regs(regs, &trace);
2592 else
2593 save_stack_trace(&trace);
2594
2595 if (trace.nr_entries > size)
2596 size = trace.nr_entries;
2597 } else
2598 /* From now on, use_stack is a boolean */
2599 use_stack = 0;
2600
2601 size *= sizeof(unsigned long);
2602
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002603 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2604 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002605 if (!event)
2606 goto out;
2607 entry = ring_buffer_event_data(event);
2608
2609 memset(&entry->caller, 0, size);
2610
2611 if (use_stack)
2612 memcpy(&entry->caller, trace.entries,
2613 trace.nr_entries * sizeof(unsigned long));
2614 else {
2615 trace.max_entries = FTRACE_STACK_ENTRIES;
2616 trace.entries = entry->caller;
2617 if (regs)
2618 save_stack_trace_regs(regs, &trace);
2619 else
2620 save_stack_trace(&trace);
2621 }
2622
2623 entry->size = trace.nr_entries;
2624
Tom Zanussif306cc82013-10-24 08:34:17 -05002625 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002626 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002627
2628 out:
2629 /* Again, don't let gcc optimize things here */
2630 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002631 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002632 preempt_enable_notrace();
2633
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002634}
2635
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002636static inline void ftrace_trace_stack(struct trace_array *tr,
2637 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002638 unsigned long flags,
2639 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002640{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002641 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002642 return;
2643
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002644 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002645}
2646
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002647void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2648 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002649{
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002650 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2651
2652 if (rcu_is_watching()) {
2653 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2654 return;
2655 }
2656
2657 /*
2658 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2659 * but if the above rcu_is_watching() failed, then the NMI
2660 * triggered someplace critical, and rcu_irq_enter() should
2661 * not be called from NMI.
2662 */
2663 if (unlikely(in_nmi()))
2664 return;
2665
2666 /*
2667 * It is possible that a function is being traced in a
2668 * location that RCU is not watching. A call to
2669 * rcu_irq_enter() will make sure that it is, but there's
2670 * a few internal rcu functions that could be traced
2671 * where that wont work either. In those cases, we just
2672 * do nothing.
2673 */
2674 if (unlikely(rcu_irq_enter_disabled()))
2675 return;
2676
2677 rcu_irq_enter_irqson();
2678 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2679 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04002680}
2681
Steven Rostedt03889382009-12-11 09:48:22 -05002682/**
2683 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002684 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002685 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002686void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002687{
2688 unsigned long flags;
2689
2690 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002691 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002692
2693 local_save_flags(flags);
2694
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002695 /*
2696 * Skip 3 more, seems to get us at the caller of
2697 * this function.
2698 */
2699 skip += 3;
2700 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2701 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002702}
2703
Steven Rostedt91e86e52010-11-10 12:56:12 +01002704static DEFINE_PER_CPU(int, user_stack_count);
2705
Steven Rostedte77405a2009-09-02 14:17:06 -04002706void
2707ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002708{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002709 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002710 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002711 struct userstack_entry *entry;
2712 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002713
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002714 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002715 return;
2716
Steven Rostedtb6345872010-03-12 20:03:30 -05002717 /*
2718 * NMIs can not handle page faults, even with fix ups.
2719 * The save user stack can (and often does) fault.
2720 */
2721 if (unlikely(in_nmi()))
2722 return;
2723
Steven Rostedt91e86e52010-11-10 12:56:12 +01002724 /*
2725 * prevent recursion, since the user stack tracing may
2726 * trigger other kernel events.
2727 */
2728 preempt_disable();
2729 if (__this_cpu_read(user_stack_count))
2730 goto out;
2731
2732 __this_cpu_inc(user_stack_count);
2733
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002734 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2735 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002736 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002737 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002738 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002739
Steven Rostedt48659d32009-09-11 11:36:23 -04002740 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002741 memset(&entry->caller, 0, sizeof(entry->caller));
2742
2743 trace.nr_entries = 0;
2744 trace.max_entries = FTRACE_STACK_ENTRIES;
2745 trace.skip = 0;
2746 trace.entries = entry->caller;
2747
2748 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002749 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002750 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002751
Li Zefan1dbd1952010-12-09 15:47:56 +08002752 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002753 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002754 out:
2755 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002756}
2757
Hannes Eder4fd27352009-02-10 19:44:12 +01002758#ifdef UNUSED
2759static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002760{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002761 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002762}
Hannes Eder4fd27352009-02-10 19:44:12 +01002763#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002764
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002765#endif /* CONFIG_STACKTRACE */
2766
Steven Rostedt07d777f2011-09-22 14:01:55 -04002767/* created for use with alloc_percpu */
2768struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002769 int nesting;
2770 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002771};
2772
2773static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002774
2775/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002776 * Thise allows for lockless recording. If we're nested too deeply, then
2777 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002778 */
2779static char *get_trace_buf(void)
2780{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002781 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002782
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002783 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002784 return NULL;
2785
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002786 return &buffer->buffer[buffer->nesting++][0];
2787}
2788
2789static void put_trace_buf(void)
2790{
2791 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002792}
2793
2794static int alloc_percpu_trace_buffer(void)
2795{
2796 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002797
2798 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002799 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2800 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002801
2802 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002803 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002804}
2805
Steven Rostedt81698832012-10-11 10:15:05 -04002806static int buffers_allocated;
2807
Steven Rostedt07d777f2011-09-22 14:01:55 -04002808void trace_printk_init_buffers(void)
2809{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002810 if (buffers_allocated)
2811 return;
2812
2813 if (alloc_percpu_trace_buffer())
2814 return;
2815
Steven Rostedt2184db42014-05-28 13:14:40 -04002816 /* trace_printk() is for debug use only. Don't use it in production. */
2817
Joe Perchesa395d6a2016-03-22 14:28:09 -07002818 pr_warn("\n");
2819 pr_warn("**********************************************************\n");
2820 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2821 pr_warn("** **\n");
2822 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2823 pr_warn("** **\n");
2824 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2825 pr_warn("** unsafe for production use. **\n");
2826 pr_warn("** **\n");
2827 pr_warn("** If you see this message and you are not debugging **\n");
2828 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2829 pr_warn("** **\n");
2830 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2831 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002832
Steven Rostedtb382ede62012-10-10 21:44:34 -04002833 /* Expand the buffers to set size */
2834 tracing_update_buffers();
2835
Steven Rostedt07d777f2011-09-22 14:01:55 -04002836 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002837
2838 /*
2839 * trace_printk_init_buffers() can be called by modules.
2840 * If that happens, then we need to start cmdline recording
2841 * directly here. If the global_trace.buffer is already
2842 * allocated here, then this was called by module code.
2843 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002844 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002845 tracing_start_cmdline_record();
2846}
2847
2848void trace_printk_start_comm(void)
2849{
2850 /* Start tracing comms if trace printk is set */
2851 if (!buffers_allocated)
2852 return;
2853 tracing_start_cmdline_record();
2854}
2855
2856static void trace_printk_start_stop_comm(int enabled)
2857{
2858 if (!buffers_allocated)
2859 return;
2860
2861 if (enabled)
2862 tracing_start_cmdline_record();
2863 else
2864 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002865}
2866
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002867/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002868 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002869 *
2870 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002871int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002872{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002873 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002874 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002875 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002876 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002877 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002878 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002879 char *tbuffer;
2880 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002881
2882 if (unlikely(tracing_selftest_running || tracing_disabled))
2883 return 0;
2884
2885 /* Don't pollute graph traces with trace_vprintk internals */
2886 pause_graph_tracing();
2887
2888 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002889 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002890
Steven Rostedt07d777f2011-09-22 14:01:55 -04002891 tbuffer = get_trace_buf();
2892 if (!tbuffer) {
2893 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002894 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002895 }
2896
2897 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2898
2899 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002900 goto out;
2901
Steven Rostedt07d777f2011-09-22 14:01:55 -04002902 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002903 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002904 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002905 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2906 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002907 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002908 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002909 entry = ring_buffer_event_data(event);
2910 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002911 entry->fmt = fmt;
2912
Steven Rostedt07d777f2011-09-22 14:01:55 -04002913 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002914 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002915 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002916 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002917 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002918
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002919out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002920 put_trace_buf();
2921
2922out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002923 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002924 unpause_graph_tracing();
2925
2926 return len;
2927}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002928EXPORT_SYMBOL_GPL(trace_vbprintk);
2929
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002930static int
2931__trace_array_vprintk(struct ring_buffer *buffer,
2932 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002933{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002934 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002935 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002936 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002937 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002938 unsigned long flags;
2939 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002940
2941 if (tracing_disabled || tracing_selftest_running)
2942 return 0;
2943
Steven Rostedt07d777f2011-09-22 14:01:55 -04002944 /* Don't pollute graph traces with trace_vprintk internals */
2945 pause_graph_tracing();
2946
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002947 pc = preempt_count();
2948 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002949
Steven Rostedt07d777f2011-09-22 14:01:55 -04002950
2951 tbuffer = get_trace_buf();
2952 if (!tbuffer) {
2953 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002954 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002955 }
2956
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002957 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002958
Steven Rostedt07d777f2011-09-22 14:01:55 -04002959 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002960 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002961 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2962 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002963 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002964 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002965 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002966 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002967
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002968 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002969 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002970 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002971 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002972 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002973
2974out:
2975 put_trace_buf();
2976
2977out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002978 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002979 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002980
2981 return len;
2982}
Steven Rostedt659372d2009-09-03 19:11:07 -04002983
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002984int trace_array_vprintk(struct trace_array *tr,
2985 unsigned long ip, const char *fmt, va_list args)
2986{
2987 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2988}
2989
2990int trace_array_printk(struct trace_array *tr,
2991 unsigned long ip, const char *fmt, ...)
2992{
2993 int ret;
2994 va_list ap;
2995
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002996 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002997 return 0;
2998
2999 va_start(ap, fmt);
3000 ret = trace_array_vprintk(tr, ip, fmt, ap);
3001 va_end(ap);
3002 return ret;
3003}
3004
3005int trace_array_printk_buf(struct ring_buffer *buffer,
3006 unsigned long ip, const char *fmt, ...)
3007{
3008 int ret;
3009 va_list ap;
3010
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003011 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003012 return 0;
3013
3014 va_start(ap, fmt);
3015 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3016 va_end(ap);
3017 return ret;
3018}
3019
Steven Rostedt659372d2009-09-03 19:11:07 -04003020int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3021{
Steven Rostedta813a152009-10-09 01:41:35 -04003022 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003023}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003024EXPORT_SYMBOL_GPL(trace_vprintk);
3025
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003026static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003027{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003028 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3029
Steven Rostedt5a90f572008-09-03 17:42:51 -04003030 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003031 if (buf_iter)
3032 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003033}
3034
Ingo Molnare309b412008-05-12 21:20:51 +02003035static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003036peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3037 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003038{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003039 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003040 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003041
Steven Rostedtd7690412008-10-01 00:29:53 -04003042 if (buf_iter)
3043 event = ring_buffer_iter_peek(buf_iter, ts);
3044 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003045 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003046 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003047
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003048 if (event) {
3049 iter->ent_size = ring_buffer_event_length(event);
3050 return ring_buffer_event_data(event);
3051 }
3052 iter->ent_size = 0;
3053 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003054}
Steven Rostedtd7690412008-10-01 00:29:53 -04003055
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003056static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003057__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3058 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003059{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003060 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003061 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003062 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003063 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003064 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003065 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003066 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003067 int cpu;
3068
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003069 /*
3070 * If we are in a per_cpu trace file, don't bother by iterating over
3071 * all cpu and peek directly.
3072 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003073 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003074 if (ring_buffer_empty_cpu(buffer, cpu_file))
3075 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003076 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003077 if (ent_cpu)
3078 *ent_cpu = cpu_file;
3079
3080 return ent;
3081 }
3082
Steven Rostedtab464282008-05-12 21:21:00 +02003083 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003084
3085 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003086 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003087
Steven Rostedtbc21b472010-03-31 19:49:26 -04003088 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003089
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003090 /*
3091 * Pick the entry with the smallest timestamp:
3092 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003093 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003094 next = ent;
3095 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003096 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003097 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003098 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003099 }
3100 }
3101
Steven Rostedt12b5da32012-03-27 10:43:28 -04003102 iter->ent_size = next_size;
3103
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003104 if (ent_cpu)
3105 *ent_cpu = next_cpu;
3106
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003107 if (ent_ts)
3108 *ent_ts = next_ts;
3109
Steven Rostedtbc21b472010-03-31 19:49:26 -04003110 if (missing_events)
3111 *missing_events = next_lost;
3112
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113 return next;
3114}
3115
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003116/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003117struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3118 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003119{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003120 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003121}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003122
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003123/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003124void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003125{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003126 iter->ent = __find_next_entry(iter, &iter->cpu,
3127 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003128
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003129 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003130 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003131
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003132 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003133}
3134
Ingo Molnare309b412008-05-12 21:20:51 +02003135static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003136{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003137 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003138 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003139}
3140
Ingo Molnare309b412008-05-12 21:20:51 +02003141static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003142{
3143 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003144 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003145 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003146
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003147 WARN_ON_ONCE(iter->leftover);
3148
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003149 (*pos)++;
3150
3151 /* can't go backwards */
3152 if (iter->idx > i)
3153 return NULL;
3154
3155 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003156 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003157 else
3158 ent = iter;
3159
3160 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003161 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003162
3163 iter->pos = *pos;
3164
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003165 return ent;
3166}
3167
Jason Wessel955b61e2010-08-05 09:22:23 -05003168void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003169{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003170 struct ring_buffer_event *event;
3171 struct ring_buffer_iter *buf_iter;
3172 unsigned long entries = 0;
3173 u64 ts;
3174
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003175 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003176
Steven Rostedt6d158a82012-06-27 20:46:14 -04003177 buf_iter = trace_buffer_iter(iter, cpu);
3178 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003179 return;
3180
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003181 ring_buffer_iter_reset(buf_iter);
3182
3183 /*
3184 * We could have the case with the max latency tracers
3185 * that a reset never took place on a cpu. This is evident
3186 * by the timestamp being before the start of the buffer.
3187 */
3188 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003189 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003190 break;
3191 entries++;
3192 ring_buffer_read(buf_iter, NULL);
3193 }
3194
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003195 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003196}
3197
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003198/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003199 * The current tracer is copied to avoid a global locking
3200 * all around.
3201 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003202static void *s_start(struct seq_file *m, loff_t *pos)
3203{
3204 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003205 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003206 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003207 void *p = NULL;
3208 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003209 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003210
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003211 /*
3212 * copy the tracer to avoid using a global lock all around.
3213 * iter->trace is a copy of current_trace, the pointer to the
3214 * name may be used instead of a strcmp(), as iter->trace->name
3215 * will point to the same string as current_trace->name.
3216 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003217 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003218 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3219 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003220 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003221
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003222#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003223 if (iter->snapshot && iter->trace->use_max_tr)
3224 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003225#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003226
3227 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003228 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003229
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003230 if (*pos != iter->pos) {
3231 iter->ent = NULL;
3232 iter->cpu = 0;
3233 iter->idx = -1;
3234
Steven Rostedtae3b5092013-01-23 15:22:59 -05003235 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003236 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003237 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003238 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003239 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003240
Lai Jiangshanac91d852010-03-02 17:54:50 +08003241 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003242 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3243 ;
3244
3245 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003246 /*
3247 * If we overflowed the seq_file before, then we want
3248 * to just reuse the trace_seq buffer again.
3249 */
3250 if (iter->leftover)
3251 p = iter;
3252 else {
3253 l = *pos - 1;
3254 p = s_next(m, p, &l);
3255 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003256 }
3257
Lai Jiangshan4f535962009-05-18 19:35:34 +08003258 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003259 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003260 return p;
3261}
3262
3263static void s_stop(struct seq_file *m, void *p)
3264{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003265 struct trace_iterator *iter = m->private;
3266
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003267#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003268 if (iter->snapshot && iter->trace->use_max_tr)
3269 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003270#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003271
3272 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003273 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003274
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003275 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003276 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003277}
3278
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003279static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003280get_total_entries(struct trace_buffer *buf,
3281 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003282{
3283 unsigned long count;
3284 int cpu;
3285
3286 *total = 0;
3287 *entries = 0;
3288
3289 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003290 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003291 /*
3292 * If this buffer has skipped entries, then we hold all
3293 * entries for the trace and we need to ignore the
3294 * ones before the time stamp.
3295 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003296 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3297 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003298 /* total is the same as the entries */
3299 *total += count;
3300 } else
3301 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003302 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003303 *entries += count;
3304 }
3305}
3306
Ingo Molnare309b412008-05-12 21:20:51 +02003307static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003308{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003309 seq_puts(m, "# _------=> CPU# \n"
3310 "# / _-----=> irqs-off \n"
3311 "# | / _----=> need-resched \n"
3312 "# || / _---=> hardirq/softirq \n"
3313 "# ||| / _--=> preempt-depth \n"
3314 "# |||| / delay \n"
3315 "# cmd pid ||||| time | caller \n"
3316 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003317}
3318
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003319static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003320{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003321 unsigned long total;
3322 unsigned long entries;
3323
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003324 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003325 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3326 entries, total, num_online_cpus());
3327 seq_puts(m, "#\n");
3328}
3329
Joel Fernandes441dae82017-06-25 22:38:43 -07003330static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3331 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003332{
Joel Fernandes441dae82017-06-25 22:38:43 -07003333 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3334
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003335 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003336
3337 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3338 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003339}
3340
Joel Fernandes441dae82017-06-25 22:38:43 -07003341static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3342 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003343{
Joel Fernandes441dae82017-06-25 22:38:43 -07003344 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3345
3346 seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? " " : "");
3347 seq_printf(m, "# %s / _----=> need-resched\n", tgid ? " " : "");
3348 seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? " " : "");
3349 seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? " " : "");
3350 seq_printf(m, "# %s||| / delay\n", tgid ? " " : "");
3351 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3352 seq_printf(m, "# | | | %s|||| | |\n", tgid ? " | " : "");
Steven Rostedt77271ce2011-11-17 09:34:33 -05003353}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003354
Jiri Olsa62b915f2010-04-02 19:01:22 +02003355void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003356print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3357{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003358 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003359 struct trace_buffer *buf = iter->trace_buffer;
3360 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003361 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003362 unsigned long entries;
3363 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003364 const char *name = "preemption";
3365
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003366 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003367
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003368 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003369
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003370 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003371 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003372 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003373 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003374 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003375 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003376 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003377 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003378 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003379 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003380#if defined(CONFIG_PREEMPT_NONE)
3381 "server",
3382#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3383 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003384#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003385 "preempt",
3386#else
3387 "unknown",
3388#endif
3389 /* These are reserved for later use */
3390 0, 0, 0, 0);
3391#ifdef CONFIG_SMP
3392 seq_printf(m, " #P:%d)\n", num_online_cpus());
3393#else
3394 seq_puts(m, ")\n");
3395#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003396 seq_puts(m, "# -----------------\n");
3397 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003398 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003399 data->comm, data->pid,
3400 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003401 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003402 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003403
3404 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003405 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003406 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3407 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003408 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003409 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3410 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003411 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003412 }
3413
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003414 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003415}
3416
Steven Rostedta3097202008-11-07 22:36:02 -05003417static void test_cpu_buff_start(struct trace_iterator *iter)
3418{
3419 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003420 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003421
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003422 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003423 return;
3424
3425 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3426 return;
3427
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003428 if (cpumask_available(iter->started) &&
3429 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003430 return;
3431
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003432 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003433 return;
3434
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003435 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003436 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003437
3438 /* Don't print started cpu buffer for the first entry of the trace */
3439 if (iter->idx > 1)
3440 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3441 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003442}
3443
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003444static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003445{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003446 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003447 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003448 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003449 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003450 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003451
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003452 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003453
Steven Rostedta3097202008-11-07 22:36:02 -05003454 test_cpu_buff_start(iter);
3455
Steven Rostedtf633cef2008-12-23 23:24:13 -05003456 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003457
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003458 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003459 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3460 trace_print_lat_context(iter);
3461 else
3462 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003463 }
3464
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003465 if (trace_seq_has_overflowed(s))
3466 return TRACE_TYPE_PARTIAL_LINE;
3467
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003468 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003469 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003470
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003471 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003472
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003473 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003474}
3475
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003476static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003477{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003478 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003479 struct trace_seq *s = &iter->seq;
3480 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003481 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003482
3483 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003484
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003485 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003486 trace_seq_printf(s, "%d %d %llu ",
3487 entry->pid, iter->cpu, iter->ts);
3488
3489 if (trace_seq_has_overflowed(s))
3490 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003491
Steven Rostedtf633cef2008-12-23 23:24:13 -05003492 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003493 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003494 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003495
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003496 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003497
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003498 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003499}
3500
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003501static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003502{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003503 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003504 struct trace_seq *s = &iter->seq;
3505 unsigned char newline = '\n';
3506 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003507 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003508
3509 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003510
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003511 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003512 SEQ_PUT_HEX_FIELD(s, entry->pid);
3513 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3514 SEQ_PUT_HEX_FIELD(s, iter->ts);
3515 if (trace_seq_has_overflowed(s))
3516 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003517 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003518
Steven Rostedtf633cef2008-12-23 23:24:13 -05003519 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003520 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003521 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003522 if (ret != TRACE_TYPE_HANDLED)
3523 return ret;
3524 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003525
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003526 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003527
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003528 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003529}
3530
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003531static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003532{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003533 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003534 struct trace_seq *s = &iter->seq;
3535 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003536 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003537
3538 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003539
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003540 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003541 SEQ_PUT_FIELD(s, entry->pid);
3542 SEQ_PUT_FIELD(s, iter->cpu);
3543 SEQ_PUT_FIELD(s, iter->ts);
3544 if (trace_seq_has_overflowed(s))
3545 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003546 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003547
Steven Rostedtf633cef2008-12-23 23:24:13 -05003548 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003549 return event ? event->funcs->binary(iter, 0, event) :
3550 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003551}
3552
Jiri Olsa62b915f2010-04-02 19:01:22 +02003553int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003554{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003555 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003556 int cpu;
3557
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003558 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003559 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003560 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003561 buf_iter = trace_buffer_iter(iter, cpu);
3562 if (buf_iter) {
3563 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003564 return 0;
3565 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003566 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003567 return 0;
3568 }
3569 return 1;
3570 }
3571
Steven Rostedtab464282008-05-12 21:21:00 +02003572 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003573 buf_iter = trace_buffer_iter(iter, cpu);
3574 if (buf_iter) {
3575 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003576 return 0;
3577 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003578 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003579 return 0;
3580 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003581 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003582
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003583 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003584}
3585
Lai Jiangshan4f535962009-05-18 19:35:34 +08003586/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003587enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003588{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003589 struct trace_array *tr = iter->tr;
3590 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003591 enum print_line_t ret;
3592
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003593 if (iter->lost_events) {
3594 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3595 iter->cpu, iter->lost_events);
3596 if (trace_seq_has_overflowed(&iter->seq))
3597 return TRACE_TYPE_PARTIAL_LINE;
3598 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003599
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003600 if (iter->trace && iter->trace->print_line) {
3601 ret = iter->trace->print_line(iter);
3602 if (ret != TRACE_TYPE_UNHANDLED)
3603 return ret;
3604 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003605
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003606 if (iter->ent->type == TRACE_BPUTS &&
3607 trace_flags & TRACE_ITER_PRINTK &&
3608 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3609 return trace_print_bputs_msg_only(iter);
3610
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003611 if (iter->ent->type == TRACE_BPRINT &&
3612 trace_flags & TRACE_ITER_PRINTK &&
3613 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003614 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003615
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003616 if (iter->ent->type == TRACE_PRINT &&
3617 trace_flags & TRACE_ITER_PRINTK &&
3618 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003619 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003620
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003621 if (trace_flags & TRACE_ITER_BIN)
3622 return print_bin_fmt(iter);
3623
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003624 if (trace_flags & TRACE_ITER_HEX)
3625 return print_hex_fmt(iter);
3626
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003627 if (trace_flags & TRACE_ITER_RAW)
3628 return print_raw_fmt(iter);
3629
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003630 return print_trace_fmt(iter);
3631}
3632
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003633void trace_latency_header(struct seq_file *m)
3634{
3635 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003636 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003637
3638 /* print nothing if the buffers are empty */
3639 if (trace_empty(iter))
3640 return;
3641
3642 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3643 print_trace_header(m, iter);
3644
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003645 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003646 print_lat_help_header(m);
3647}
3648
Jiri Olsa62b915f2010-04-02 19:01:22 +02003649void trace_default_header(struct seq_file *m)
3650{
3651 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003652 struct trace_array *tr = iter->tr;
3653 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003654
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003655 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3656 return;
3657
Jiri Olsa62b915f2010-04-02 19:01:22 +02003658 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3659 /* print nothing if the buffers are empty */
3660 if (trace_empty(iter))
3661 return;
3662 print_trace_header(m, iter);
3663 if (!(trace_flags & TRACE_ITER_VERBOSE))
3664 print_lat_help_header(m);
3665 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003666 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3667 if (trace_flags & TRACE_ITER_IRQ_INFO)
Joel Fernandes441dae82017-06-25 22:38:43 -07003668 print_func_help_header_irq(iter->trace_buffer,
3669 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003670 else
Joel Fernandes441dae82017-06-25 22:38:43 -07003671 print_func_help_header(iter->trace_buffer, m,
3672 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003673 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003674 }
3675}
3676
Steven Rostedte0a413f2011-09-29 21:26:16 -04003677static void test_ftrace_alive(struct seq_file *m)
3678{
3679 if (!ftrace_is_dead())
3680 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003681 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3682 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003683}
3684
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003685#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003686static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003687{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003688 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3689 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3690 "# Takes a snapshot of the main buffer.\n"
3691 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3692 "# (Doesn't have to be '2' works with any number that\n"
3693 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003694}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003695
3696static void show_snapshot_percpu_help(struct seq_file *m)
3697{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003698 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003699#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003700 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3701 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003702#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003703 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3704 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003705#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003706 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3707 "# (Doesn't have to be '2' works with any number that\n"
3708 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003709}
3710
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003711static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3712{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003713 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003714 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003715 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003716 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003717
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003718 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003719 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3720 show_snapshot_main_help(m);
3721 else
3722 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003723}
3724#else
3725/* Should never be called */
3726static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3727#endif
3728
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003729static int s_show(struct seq_file *m, void *v)
3730{
3731 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003732 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003733
3734 if (iter->ent == NULL) {
3735 if (iter->tr) {
3736 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3737 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003738 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003739 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003740 if (iter->snapshot && trace_empty(iter))
3741 print_snapshot_help(m, iter);
3742 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003743 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003744 else
3745 trace_default_header(m);
3746
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003747 } else if (iter->leftover) {
3748 /*
3749 * If we filled the seq_file buffer earlier, we
3750 * want to just show it now.
3751 */
3752 ret = trace_print_seq(m, &iter->seq);
3753
3754 /* ret should this time be zero, but you never know */
3755 iter->leftover = ret;
3756
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003757 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003758 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003759 ret = trace_print_seq(m, &iter->seq);
3760 /*
3761 * If we overflow the seq_file buffer, then it will
3762 * ask us for this data again at start up.
3763 * Use that instead.
3764 * ret is 0 if seq_file write succeeded.
3765 * -1 otherwise.
3766 */
3767 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003768 }
3769
3770 return 0;
3771}
3772
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003773/*
3774 * Should be used after trace_array_get(), trace_types_lock
3775 * ensures that i_cdev was already initialized.
3776 */
3777static inline int tracing_get_cpu(struct inode *inode)
3778{
3779 if (inode->i_cdev) /* See trace_create_cpu_file() */
3780 return (long)inode->i_cdev - 1;
3781 return RING_BUFFER_ALL_CPUS;
3782}
3783
James Morris88e9d342009-09-22 16:43:43 -07003784static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003785 .start = s_start,
3786 .next = s_next,
3787 .stop = s_stop,
3788 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003789};
3790
Ingo Molnare309b412008-05-12 21:20:51 +02003791static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003792__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003793{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003794 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003795 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003796 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003797
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003798 if (tracing_disabled)
3799 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003800
Jiri Olsa50e18b92012-04-25 10:23:39 +02003801 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003802 if (!iter)
3803 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003804
Gil Fruchter72917232015-06-09 10:32:35 +03003805 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003806 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003807 if (!iter->buffer_iter)
3808 goto release;
3809
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003810 /*
3811 * We make a copy of the current tracer to avoid concurrent
3812 * changes on it while we are reading.
3813 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003814 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003815 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003816 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003817 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003818
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003819 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003820
Li Zefan79f55992009-06-15 14:58:26 +08003821 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003822 goto fail;
3823
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003824 iter->tr = tr;
3825
3826#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003827 /* Currently only the top directory has a snapshot */
3828 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003829 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003830 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003831#endif
3832 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003833 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003834 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003835 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003836 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003837
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003838 /* Notify the tracer early; before we stop tracing. */
3839 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003840 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003841
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003842 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003843 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003844 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3845
David Sharp8be07092012-11-13 12:18:22 -08003846 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003847 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003848 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3849
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003850 /* stop the trace while dumping if we are not opening "snapshot" */
3851 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003852 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003853
Steven Rostedtae3b5092013-01-23 15:22:59 -05003854 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003855 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003856 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003857 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003858 }
3859 ring_buffer_read_prepare_sync();
3860 for_each_tracing_cpu(cpu) {
3861 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003862 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003863 }
3864 } else {
3865 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003866 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003867 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003868 ring_buffer_read_prepare_sync();
3869 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003870 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003871 }
3872
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003873 mutex_unlock(&trace_types_lock);
3874
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003875 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003876
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003877 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003878 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003879 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003880 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003881release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003882 seq_release_private(inode, file);
3883 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003884}
3885
3886int tracing_open_generic(struct inode *inode, struct file *filp)
3887{
Steven Rostedt60a11772008-05-12 21:20:44 +02003888 if (tracing_disabled)
3889 return -ENODEV;
3890
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003891 filp->private_data = inode->i_private;
3892 return 0;
3893}
3894
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003895bool tracing_is_disabled(void)
3896{
3897 return (tracing_disabled) ? true: false;
3898}
3899
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003900/*
3901 * Open and update trace_array ref count.
3902 * Must have the current trace_array passed to it.
3903 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003904static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003905{
3906 struct trace_array *tr = inode->i_private;
3907
3908 if (tracing_disabled)
3909 return -ENODEV;
3910
3911 if (trace_array_get(tr) < 0)
3912 return -ENODEV;
3913
3914 filp->private_data = inode->i_private;
3915
3916 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003917}
3918
Hannes Eder4fd27352009-02-10 19:44:12 +01003919static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003920{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003921 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003922 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003923 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003924 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003925
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003926 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003927 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003928 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003929 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003930
Oleg Nesterov6484c712013-07-23 17:26:10 +02003931 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003932 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003933 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003934
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003935 for_each_tracing_cpu(cpu) {
3936 if (iter->buffer_iter[cpu])
3937 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3938 }
3939
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003940 if (iter->trace && iter->trace->close)
3941 iter->trace->close(iter);
3942
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003943 if (!iter->snapshot)
3944 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003945 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003946
3947 __trace_array_put(tr);
3948
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003949 mutex_unlock(&trace_types_lock);
3950
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003951 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003952 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003953 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003954 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003955 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003956
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003957 return 0;
3958}
3959
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003960static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3961{
3962 struct trace_array *tr = inode->i_private;
3963
3964 trace_array_put(tr);
3965 return 0;
3966}
3967
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003968static int tracing_single_release_tr(struct inode *inode, struct file *file)
3969{
3970 struct trace_array *tr = inode->i_private;
3971
3972 trace_array_put(tr);
3973
3974 return single_release(inode, file);
3975}
3976
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003977static int tracing_open(struct inode *inode, struct file *file)
3978{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003979 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003980 struct trace_iterator *iter;
3981 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003982
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003983 if (trace_array_get(tr) < 0)
3984 return -ENODEV;
3985
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003986 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003987 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3988 int cpu = tracing_get_cpu(inode);
3989
3990 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003991 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003992 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003993 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003994 }
3995
3996 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003997 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003998 if (IS_ERR(iter))
3999 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004000 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004001 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4002 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004003
4004 if (ret < 0)
4005 trace_array_put(tr);
4006
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004007 return ret;
4008}
4009
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004010/*
4011 * Some tracers are not suitable for instance buffers.
4012 * A tracer is always available for the global array (toplevel)
4013 * or if it explicitly states that it is.
4014 */
4015static bool
4016trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4017{
4018 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4019}
4020
4021/* Find the next tracer that this trace array may use */
4022static struct tracer *
4023get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4024{
4025 while (t && !trace_ok_for_array(t, tr))
4026 t = t->next;
4027
4028 return t;
4029}
4030
Ingo Molnare309b412008-05-12 21:20:51 +02004031static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004032t_next(struct seq_file *m, void *v, loff_t *pos)
4033{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004034 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004035 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004036
4037 (*pos)++;
4038
4039 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004040 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004041
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004042 return t;
4043}
4044
4045static void *t_start(struct seq_file *m, loff_t *pos)
4046{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004047 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004048 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004049 loff_t l = 0;
4050
4051 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004052
4053 t = get_tracer_for_array(tr, trace_types);
4054 for (; t && l < *pos; t = t_next(m, t, &l))
4055 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004056
4057 return t;
4058}
4059
4060static void t_stop(struct seq_file *m, void *p)
4061{
4062 mutex_unlock(&trace_types_lock);
4063}
4064
4065static int t_show(struct seq_file *m, void *v)
4066{
4067 struct tracer *t = v;
4068
4069 if (!t)
4070 return 0;
4071
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004072 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004073 if (t->next)
4074 seq_putc(m, ' ');
4075 else
4076 seq_putc(m, '\n');
4077
4078 return 0;
4079}
4080
James Morris88e9d342009-09-22 16:43:43 -07004081static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004082 .start = t_start,
4083 .next = t_next,
4084 .stop = t_stop,
4085 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004086};
4087
4088static int show_traces_open(struct inode *inode, struct file *file)
4089{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004090 struct trace_array *tr = inode->i_private;
4091 struct seq_file *m;
4092 int ret;
4093
Steven Rostedt60a11772008-05-12 21:20:44 +02004094 if (tracing_disabled)
4095 return -ENODEV;
4096
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004097 ret = seq_open(file, &show_traces_seq_ops);
4098 if (ret)
4099 return ret;
4100
4101 m = file->private_data;
4102 m->private = tr;
4103
4104 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004105}
4106
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004107static ssize_t
4108tracing_write_stub(struct file *filp, const char __user *ubuf,
4109 size_t count, loff_t *ppos)
4110{
4111 return count;
4112}
4113
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004114loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004115{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004116 int ret;
4117
Slava Pestov364829b2010-11-24 15:13:16 -08004118 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004119 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004120 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004121 file->f_pos = ret = 0;
4122
4123 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004124}
4125
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004126static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004127 .open = tracing_open,
4128 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004129 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004130 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004131 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004132};
4133
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004134static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004135 .open = show_traces_open,
4136 .read = seq_read,
4137 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004138 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004139};
4140
Ingo Molnar36dfe922008-05-12 21:20:52 +02004141/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02004142 * The tracer itself will not take this lock, but still we want
4143 * to provide a consistent cpumask to user-space:
4144 */
4145static DEFINE_MUTEX(tracing_cpumask_update_lock);
4146
4147/*
4148 * Temporary storage for the character representation of the
4149 * CPU bitmask (and one more byte for the newline):
4150 */
4151static char mask_str[NR_CPUS + 1];
4152
Ingo Molnarc7078de2008-05-12 21:20:52 +02004153static ssize_t
4154tracing_cpumask_read(struct file *filp, char __user *ubuf,
4155 size_t count, loff_t *ppos)
4156{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004157 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004158 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004159
4160 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004161
Tejun Heo1a402432015-02-13 14:37:39 -08004162 len = snprintf(mask_str, count, "%*pb\n",
4163 cpumask_pr_args(tr->tracing_cpumask));
4164 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004165 count = -EINVAL;
4166 goto out_err;
4167 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02004168 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4169
4170out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02004171 mutex_unlock(&tracing_cpumask_update_lock);
4172
4173 return count;
4174}
4175
4176static ssize_t
4177tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4178 size_t count, loff_t *ppos)
4179{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004180 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304181 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004182 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304183
4184 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4185 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004186
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304187 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004188 if (err)
4189 goto err_unlock;
4190
Li Zefan215368e2009-06-15 10:56:42 +08004191 mutex_lock(&tracing_cpumask_update_lock);
4192
Steven Rostedta5e25882008-12-02 15:34:05 -05004193 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004194 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004195 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004196 /*
4197 * Increase/decrease the disabled counter if we are
4198 * about to flip a bit in the cpumask:
4199 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004200 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304201 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004202 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4203 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004204 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004205 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304206 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004207 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4208 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004209 }
4210 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004211 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004212 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004213
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004214 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004215
Ingo Molnarc7078de2008-05-12 21:20:52 +02004216 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304217 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004218
Ingo Molnarc7078de2008-05-12 21:20:52 +02004219 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004220
4221err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004222 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004223
4224 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004225}
4226
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004227static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004228 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004229 .read = tracing_cpumask_read,
4230 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004231 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004232 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004233};
4234
Li Zefanfdb372e2009-12-08 11:15:59 +08004235static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004236{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004237 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004238 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004239 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004240 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004241
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004242 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004243 tracer_flags = tr->current_trace->flags->val;
4244 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004245
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004246 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004247 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004248 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004249 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004250 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004251 }
4252
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004253 for (i = 0; trace_opts[i].name; i++) {
4254 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004255 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004256 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004257 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004258 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004259 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004260
Li Zefanfdb372e2009-12-08 11:15:59 +08004261 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004262}
4263
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004264static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004265 struct tracer_flags *tracer_flags,
4266 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004267{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004268 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004269 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004270
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004271 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004272 if (ret)
4273 return ret;
4274
4275 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004276 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004277 else
Zhaolei77708412009-08-07 18:53:21 +08004278 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004279 return 0;
4280}
4281
Li Zefan8d18eaa2009-12-08 11:17:06 +08004282/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004283static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004284{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004285 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004286 struct tracer_flags *tracer_flags = trace->flags;
4287 struct tracer_opt *opts = NULL;
4288 int i;
4289
4290 for (i = 0; tracer_flags->opts[i].name; i++) {
4291 opts = &tracer_flags->opts[i];
4292
4293 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004294 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004295 }
4296
4297 return -EINVAL;
4298}
4299
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004300/* Some tracers require overwrite to stay enabled */
4301int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4302{
4303 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4304 return -1;
4305
4306 return 0;
4307}
4308
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004309int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004310{
4311 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004312 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004313 return 0;
4314
4315 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004316 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004317 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004318 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004319
4320 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004321 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004322 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004323 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004324
4325 if (mask == TRACE_ITER_RECORD_CMD)
4326 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004327
Joel Fernandesd914ba32017-06-26 19:01:55 -07004328 if (mask == TRACE_ITER_RECORD_TGID) {
4329 if (!tgid_map)
4330 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4331 GFP_KERNEL);
4332 if (!tgid_map) {
4333 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4334 return -ENOMEM;
4335 }
4336
4337 trace_event_enable_tgid_record(enabled);
4338 }
4339
Steven Rostedtc37775d2016-04-13 16:59:18 -04004340 if (mask == TRACE_ITER_EVENT_FORK)
4341 trace_event_follow_fork(tr, enabled);
4342
Namhyung Kim1e104862017-04-17 11:44:28 +09004343 if (mask == TRACE_ITER_FUNC_FORK)
4344 ftrace_pid_follow_fork(tr, enabled);
4345
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004346 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004347 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004348#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004349 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004350#endif
4351 }
Steven Rostedt81698832012-10-11 10:15:05 -04004352
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004353 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004354 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004355 trace_printk_control(enabled);
4356 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004357
4358 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004359}
4360
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004361static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004362{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004363 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004364 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004365 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004366 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004367 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004368
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004369 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004370
Li Zefan8d18eaa2009-12-08 11:17:06 +08004371 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004372 neg = 1;
4373 cmp += 2;
4374 }
4375
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004376 mutex_lock(&trace_types_lock);
4377
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004378 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004379 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004380 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004381 break;
4382 }
4383 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004384
4385 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004386 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004387 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004388
4389 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004390
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004391 /*
4392 * If the first trailing whitespace is replaced with '\0' by strstrip,
4393 * turn it back into a space.
4394 */
4395 if (orig_len > strlen(option))
4396 option[strlen(option)] = ' ';
4397
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004398 return ret;
4399}
4400
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004401static void __init apply_trace_boot_options(void)
4402{
4403 char *buf = trace_boot_options_buf;
4404 char *option;
4405
4406 while (true) {
4407 option = strsep(&buf, ",");
4408
4409 if (!option)
4410 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004411
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004412 if (*option)
4413 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004414
4415 /* Put back the comma to allow this to be called again */
4416 if (buf)
4417 *(buf - 1) = ',';
4418 }
4419}
4420
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004421static ssize_t
4422tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4423 size_t cnt, loff_t *ppos)
4424{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004425 struct seq_file *m = filp->private_data;
4426 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004427 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004428 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004429
4430 if (cnt >= sizeof(buf))
4431 return -EINVAL;
4432
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004433 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004434 return -EFAULT;
4435
Steven Rostedta8dd2172013-01-09 20:54:17 -05004436 buf[cnt] = 0;
4437
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004438 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004439 if (ret < 0)
4440 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004441
Jiri Olsacf8517c2009-10-23 19:36:16 -04004442 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004443
4444 return cnt;
4445}
4446
Li Zefanfdb372e2009-12-08 11:15:59 +08004447static int tracing_trace_options_open(struct inode *inode, struct file *file)
4448{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004449 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004450 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004451
Li Zefanfdb372e2009-12-08 11:15:59 +08004452 if (tracing_disabled)
4453 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004454
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004455 if (trace_array_get(tr) < 0)
4456 return -ENODEV;
4457
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004458 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4459 if (ret < 0)
4460 trace_array_put(tr);
4461
4462 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004463}
4464
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004465static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004466 .open = tracing_trace_options_open,
4467 .read = seq_read,
4468 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004469 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004470 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004471};
4472
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004473static const char readme_msg[] =
4474 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004475 "# echo 0 > tracing_on : quick way to disable tracing\n"
4476 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4477 " Important files:\n"
4478 " trace\t\t\t- The static contents of the buffer\n"
4479 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4480 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4481 " current_tracer\t- function and latency tracers\n"
4482 " available_tracers\t- list of configured tracers for current_tracer\n"
4483 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4484 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4485 " trace_clock\t\t-change the clock used to order events\n"
4486 " local: Per cpu clock but may not be synced across CPUs\n"
4487 " global: Synced across CPUs but slows tracing down.\n"
4488 " counter: Not a clock, but just an increment\n"
4489 " uptime: Jiffy counter from time of boot\n"
4490 " perf: Same clock that perf events use\n"
4491#ifdef CONFIG_X86_64
4492 " x86-tsc: TSC cycle counter\n"
4493#endif
4494 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004495 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004496 " tracing_cpumask\t- Limit which CPUs to trace\n"
4497 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4498 "\t\t\t Remove sub-buffer with rmdir\n"
4499 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004500 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4501 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004502 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004503#ifdef CONFIG_DYNAMIC_FTRACE
4504 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004505 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4506 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004507 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004508 "\t modules: Can select a group via module\n"
4509 "\t Format: :mod:<module-name>\n"
4510 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4511 "\t triggers: a command to perform when function is hit\n"
4512 "\t Format: <function>:<trigger>[:count]\n"
4513 "\t trigger: traceon, traceoff\n"
4514 "\t\t enable_event:<system>:<event>\n"
4515 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004516#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004517 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004518#endif
4519#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004520 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004521#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004522 "\t\t dump\n"
4523 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004524 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4525 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4526 "\t The first one will disable tracing every time do_fault is hit\n"
4527 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4528 "\t The first time do trap is hit and it disables tracing, the\n"
4529 "\t counter will decrement to 2. If tracing is already disabled,\n"
4530 "\t the counter will not decrement. It only decrements when the\n"
4531 "\t trigger did work\n"
4532 "\t To remove trigger without count:\n"
4533 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4534 "\t To remove trigger with a count:\n"
4535 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004536 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004537 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4538 "\t modules: Can select a group via module command :mod:\n"
4539 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004540#endif /* CONFIG_DYNAMIC_FTRACE */
4541#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004542 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4543 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004544#endif
4545#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4546 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004547 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004548 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4549#endif
4550#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004551 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4552 "\t\t\t snapshot buffer. Read the contents for more\n"
4553 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004554#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004555#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004556 " stack_trace\t\t- Shows the max stack trace when active\n"
4557 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004558 "\t\t\t Write into this file to reset the max size (trigger a\n"
4559 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004560#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004561 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4562 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004563#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004564#endif /* CONFIG_STACK_TRACER */
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004565#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004566 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4567 "\t\t\t Write into this file to define/undefine new trace events.\n"
4568#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004569#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004570 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4571 "\t\t\t Write into this file to define/undefine new trace events.\n"
4572#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004573#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004574 "\t accepts: event-definitions (one definition per line)\n"
4575 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4576 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004577#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004578 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05304579 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004580#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004581#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004582 "\t place: <path>:<offset>\n"
4583#endif
4584 "\t args: <name>=fetcharg[:type]\n"
4585 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4586 "\t $stack<index>, $stack, $retval, $comm\n"
4587 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4588 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4589#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004590 " events/\t\t- Directory containing all trace event subsystems:\n"
4591 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4592 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004593 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4594 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004595 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004596 " events/<system>/<event>/\t- Directory containing control files for\n"
4597 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004598 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4599 " filter\t\t- If set, only events passing filter are traced\n"
4600 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004601 "\t Format: <trigger>[:count][if <filter>]\n"
4602 "\t trigger: traceon, traceoff\n"
4603 "\t enable_event:<system>:<event>\n"
4604 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004605#ifdef CONFIG_HIST_TRIGGERS
4606 "\t enable_hist:<system>:<event>\n"
4607 "\t disable_hist:<system>:<event>\n"
4608#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004609#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004610 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004611#endif
4612#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004613 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004614#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004615#ifdef CONFIG_HIST_TRIGGERS
4616 "\t\t hist (see below)\n"
4617#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004618 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4619 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4620 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4621 "\t events/block/block_unplug/trigger\n"
4622 "\t The first disables tracing every time block_unplug is hit.\n"
4623 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4624 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4625 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4626 "\t Like function triggers, the counter is only decremented if it\n"
4627 "\t enabled or disabled tracing.\n"
4628 "\t To remove a trigger without a count:\n"
4629 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4630 "\t To remove a trigger with a count:\n"
4631 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4632 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004633#ifdef CONFIG_HIST_TRIGGERS
4634 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004635 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004636 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004637 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004638 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004639 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004640 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004641 "\t [if <filter>]\n\n"
4642 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004643 "\t table using the key(s) and value(s) named, and the value of a\n"
4644 "\t sum called 'hitcount' is incremented. Keys and values\n"
4645 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004646 "\t can be any field, or the special string 'stacktrace'.\n"
4647 "\t Compound keys consisting of up to two fields can be specified\n"
4648 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4649 "\t fields. Sort keys consisting of up to two fields can be\n"
4650 "\t specified using the 'sort' keyword. The sort direction can\n"
4651 "\t be modified by appending '.descending' or '.ascending' to a\n"
4652 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004653 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4654 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4655 "\t its histogram data will be shared with other triggers of the\n"
4656 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004657 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004658 "\t table in its entirety to stdout. If there are multiple hist\n"
4659 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004660 "\t trigger in the output. The table displayed for a named\n"
4661 "\t trigger will be the same as any other instance having the\n"
4662 "\t same name. The default format used to display a given field\n"
4663 "\t can be modified by appending any of the following modifiers\n"
4664 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004665 "\t .hex display a number as a hex value\n"
4666 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004667 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004668 "\t .execname display a common_pid as a program name\n"
4669 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004670 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004671 "\t The 'pause' parameter can be used to pause an existing hist\n"
4672 "\t trigger or to start a hist trigger but not log any events\n"
4673 "\t until told to do so. 'continue' can be used to start or\n"
4674 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004675 "\t The 'clear' parameter will clear the contents of a running\n"
4676 "\t hist trigger and leave its current paused/active state\n"
4677 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004678 "\t The enable_hist and disable_hist triggers can be used to\n"
4679 "\t have one event conditionally start and stop another event's\n"
4680 "\t already-attached hist trigger. The syntax is analagous to\n"
4681 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004682#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004683;
4684
4685static ssize_t
4686tracing_readme_read(struct file *filp, char __user *ubuf,
4687 size_t cnt, loff_t *ppos)
4688{
4689 return simple_read_from_buffer(ubuf, cnt, ppos,
4690 readme_msg, strlen(readme_msg));
4691}
4692
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004693static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004694 .open = tracing_open_generic,
4695 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004696 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004697};
4698
Michael Sartain99c621d2017-07-05 22:07:15 -06004699static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4700{
4701 int *ptr = v;
4702
4703 if (*pos || m->count)
4704 ptr++;
4705
4706 (*pos)++;
4707
4708 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4709 if (trace_find_tgid(*ptr))
4710 return ptr;
4711 }
4712
4713 return NULL;
4714}
4715
4716static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4717{
4718 void *v;
4719 loff_t l = 0;
4720
4721 if (!tgid_map)
4722 return NULL;
4723
4724 v = &tgid_map[0];
4725 while (l <= *pos) {
4726 v = saved_tgids_next(m, v, &l);
4727 if (!v)
4728 return NULL;
4729 }
4730
4731 return v;
4732}
4733
4734static void saved_tgids_stop(struct seq_file *m, void *v)
4735{
4736}
4737
4738static int saved_tgids_show(struct seq_file *m, void *v)
4739{
4740 int pid = (int *)v - tgid_map;
4741
4742 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4743 return 0;
4744}
4745
4746static const struct seq_operations tracing_saved_tgids_seq_ops = {
4747 .start = saved_tgids_start,
4748 .stop = saved_tgids_stop,
4749 .next = saved_tgids_next,
4750 .show = saved_tgids_show,
4751};
4752
4753static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4754{
4755 if (tracing_disabled)
4756 return -ENODEV;
4757
4758 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4759}
4760
4761
4762static const struct file_operations tracing_saved_tgids_fops = {
4763 .open = tracing_saved_tgids_open,
4764 .read = seq_read,
4765 .llseek = seq_lseek,
4766 .release = seq_release,
4767};
4768
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004769static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004770{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004771 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004772
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004773 if (*pos || m->count)
4774 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004775
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004776 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004777
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004778 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4779 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004780 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004781 continue;
4782
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004783 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004784 }
4785
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004786 return NULL;
4787}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004788
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004789static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4790{
4791 void *v;
4792 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004793
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004794 preempt_disable();
4795 arch_spin_lock(&trace_cmdline_lock);
4796
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004797 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004798 while (l <= *pos) {
4799 v = saved_cmdlines_next(m, v, &l);
4800 if (!v)
4801 return NULL;
4802 }
4803
4804 return v;
4805}
4806
4807static void saved_cmdlines_stop(struct seq_file *m, void *v)
4808{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004809 arch_spin_unlock(&trace_cmdline_lock);
4810 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004811}
4812
4813static int saved_cmdlines_show(struct seq_file *m, void *v)
4814{
4815 char buf[TASK_COMM_LEN];
4816 unsigned int *pid = v;
4817
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004818 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004819 seq_printf(m, "%d %s\n", *pid, buf);
4820 return 0;
4821}
4822
4823static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4824 .start = saved_cmdlines_start,
4825 .next = saved_cmdlines_next,
4826 .stop = saved_cmdlines_stop,
4827 .show = saved_cmdlines_show,
4828};
4829
4830static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4831{
4832 if (tracing_disabled)
4833 return -ENODEV;
4834
4835 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004836}
4837
4838static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004839 .open = tracing_saved_cmdlines_open,
4840 .read = seq_read,
4841 .llseek = seq_lseek,
4842 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004843};
4844
4845static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004846tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4847 size_t cnt, loff_t *ppos)
4848{
4849 char buf[64];
4850 int r;
4851
4852 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004853 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004854 arch_spin_unlock(&trace_cmdline_lock);
4855
4856 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4857}
4858
4859static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4860{
4861 kfree(s->saved_cmdlines);
4862 kfree(s->map_cmdline_to_pid);
4863 kfree(s);
4864}
4865
4866static int tracing_resize_saved_cmdlines(unsigned int val)
4867{
4868 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4869
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004870 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004871 if (!s)
4872 return -ENOMEM;
4873
4874 if (allocate_cmdlines_buffer(val, s) < 0) {
4875 kfree(s);
4876 return -ENOMEM;
4877 }
4878
4879 arch_spin_lock(&trace_cmdline_lock);
4880 savedcmd_temp = savedcmd;
4881 savedcmd = s;
4882 arch_spin_unlock(&trace_cmdline_lock);
4883 free_saved_cmdlines_buffer(savedcmd_temp);
4884
4885 return 0;
4886}
4887
4888static ssize_t
4889tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4890 size_t cnt, loff_t *ppos)
4891{
4892 unsigned long val;
4893 int ret;
4894
4895 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4896 if (ret)
4897 return ret;
4898
4899 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4900 if (!val || val > PID_MAX_DEFAULT)
4901 return -EINVAL;
4902
4903 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4904 if (ret < 0)
4905 return ret;
4906
4907 *ppos += cnt;
4908
4909 return cnt;
4910}
4911
4912static const struct file_operations tracing_saved_cmdlines_size_fops = {
4913 .open = tracing_open_generic,
4914 .read = tracing_saved_cmdlines_size_read,
4915 .write = tracing_saved_cmdlines_size_write,
4916};
4917
Jeremy Linton681bec02017-05-31 16:56:53 -05004918#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004919static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004920update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004921{
Jeremy Linton00f4b652017-05-31 16:56:43 -05004922 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004923 if (ptr->tail.next) {
4924 ptr = ptr->tail.next;
4925 /* Set ptr to the next real item (skip head) */
4926 ptr++;
4927 } else
4928 return NULL;
4929 }
4930 return ptr;
4931}
4932
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004933static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004934{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004935 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004936
4937 /*
4938 * Paranoid! If ptr points to end, we don't want to increment past it.
4939 * This really should never happen.
4940 */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004941 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004942 if (WARN_ON_ONCE(!ptr))
4943 return NULL;
4944
4945 ptr++;
4946
4947 (*pos)++;
4948
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004949 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004950
4951 return ptr;
4952}
4953
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004954static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004955{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004956 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004957 loff_t l = 0;
4958
Jeremy Linton1793ed92017-05-31 16:56:46 -05004959 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004960
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004961 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004962 if (v)
4963 v++;
4964
4965 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004966 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004967 }
4968
4969 return v;
4970}
4971
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004972static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004973{
Jeremy Linton1793ed92017-05-31 16:56:46 -05004974 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004975}
4976
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004977static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004978{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004979 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004980
4981 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05004982 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004983 ptr->map.system);
4984
4985 return 0;
4986}
4987
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004988static const struct seq_operations tracing_eval_map_seq_ops = {
4989 .start = eval_map_start,
4990 .next = eval_map_next,
4991 .stop = eval_map_stop,
4992 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004993};
4994
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004995static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004996{
4997 if (tracing_disabled)
4998 return -ENODEV;
4999
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005000 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005001}
5002
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005003static const struct file_operations tracing_eval_map_fops = {
5004 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005005 .read = seq_read,
5006 .llseek = seq_lseek,
5007 .release = seq_release,
5008};
5009
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005010static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005011trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005012{
5013 /* Return tail of array given the head */
5014 return ptr + ptr->head.length + 1;
5015}
5016
5017static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005018trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005019 int len)
5020{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005021 struct trace_eval_map **stop;
5022 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005023 union trace_eval_map_item *map_array;
5024 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005025
5026 stop = start + len;
5027
5028 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005029 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005030 * where the head holds the module and length of array, and the
5031 * tail holds a pointer to the next list.
5032 */
5033 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5034 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005035 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005036 return;
5037 }
5038
Jeremy Linton1793ed92017-05-31 16:56:46 -05005039 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005040
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005041 if (!trace_eval_maps)
5042 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005043 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005044 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005045 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005046 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005047 if (!ptr->tail.next)
5048 break;
5049 ptr = ptr->tail.next;
5050
5051 }
5052 ptr->tail.next = map_array;
5053 }
5054 map_array->head.mod = mod;
5055 map_array->head.length = len;
5056 map_array++;
5057
5058 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5059 map_array->map = **map;
5060 map_array++;
5061 }
5062 memset(map_array, 0, sizeof(*map_array));
5063
Jeremy Linton1793ed92017-05-31 16:56:46 -05005064 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005065}
5066
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005067static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005068{
Jeremy Linton681bec02017-05-31 16:56:53 -05005069 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005070 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005071}
5072
Jeremy Linton681bec02017-05-31 16:56:53 -05005073#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005074static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5075static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005076 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005077#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005078
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005079static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005080 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005081{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005082 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005083
5084 if (len <= 0)
5085 return;
5086
5087 map = start;
5088
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005089 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005090
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005091 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005092}
5093
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005094static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005095tracing_set_trace_read(struct file *filp, char __user *ubuf,
5096 size_t cnt, loff_t *ppos)
5097{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005098 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005099 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005100 int r;
5101
5102 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005103 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005104 mutex_unlock(&trace_types_lock);
5105
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005106 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005107}
5108
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005109int tracer_init(struct tracer *t, struct trace_array *tr)
5110{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005111 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005112 return t->init(tr);
5113}
5114
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005115static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005116{
5117 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005118
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005119 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005120 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005121}
5122
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005123#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005124/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005125static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5126 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005127{
5128 int cpu, ret = 0;
5129
5130 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5131 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005132 ret = ring_buffer_resize(trace_buf->buffer,
5133 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005134 if (ret < 0)
5135 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005136 per_cpu_ptr(trace_buf->data, cpu)->entries =
5137 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005138 }
5139 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005140 ret = ring_buffer_resize(trace_buf->buffer,
5141 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005142 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005143 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5144 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005145 }
5146
5147 return ret;
5148}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005149#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005150
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005151static int __tracing_resize_ring_buffer(struct trace_array *tr,
5152 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005153{
5154 int ret;
5155
5156 /*
5157 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005158 * we use the size that was given, and we can forget about
5159 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005160 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005161 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005162
Steven Rostedtb382ede62012-10-10 21:44:34 -04005163 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005164 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005165 return 0;
5166
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005167 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005168 if (ret < 0)
5169 return ret;
5170
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005171#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005172 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5173 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005174 goto out;
5175
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005176 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005177 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005178 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5179 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005180 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005181 /*
5182 * AARGH! We are left with different
5183 * size max buffer!!!!
5184 * The max buffer is our "snapshot" buffer.
5185 * When a tracer needs a snapshot (one of the
5186 * latency tracers), it swaps the max buffer
5187 * with the saved snap shot. We succeeded to
5188 * update the size of the main buffer, but failed to
5189 * update the size of the max buffer. But when we tried
5190 * to reset the main buffer to the original size, we
5191 * failed there too. This is very unlikely to
5192 * happen, but if it does, warn and kill all
5193 * tracing.
5194 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005195 WARN_ON(1);
5196 tracing_disabled = 1;
5197 }
5198 return ret;
5199 }
5200
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005201 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005202 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005203 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005204 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005205
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005206 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005207#endif /* CONFIG_TRACER_MAX_TRACE */
5208
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005209 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005210 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005211 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005212 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005213
5214 return ret;
5215}
5216
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005217static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5218 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005219{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005220 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005221
5222 mutex_lock(&trace_types_lock);
5223
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005224 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5225 /* make sure, this cpu is enabled in the mask */
5226 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5227 ret = -EINVAL;
5228 goto out;
5229 }
5230 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005231
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005232 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005233 if (ret < 0)
5234 ret = -ENOMEM;
5235
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005236out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005237 mutex_unlock(&trace_types_lock);
5238
5239 return ret;
5240}
5241
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005242
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005243/**
5244 * tracing_update_buffers - used by tracing facility to expand ring buffers
5245 *
5246 * To save on memory when the tracing is never used on a system with it
5247 * configured in. The ring buffers are set to a minimum size. But once
5248 * a user starts to use the tracing facility, then they need to grow
5249 * to their default size.
5250 *
5251 * This function is to be called when a tracer is about to be used.
5252 */
5253int tracing_update_buffers(void)
5254{
5255 int ret = 0;
5256
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005257 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005258 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005259 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005260 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005261 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005262
5263 return ret;
5264}
5265
Steven Rostedt577b7852009-02-26 23:43:05 -05005266struct trace_option_dentry;
5267
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005268static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005269create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005270
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005271/*
5272 * Used to clear out the tracer before deletion of an instance.
5273 * Must have trace_types_lock held.
5274 */
5275static void tracing_set_nop(struct trace_array *tr)
5276{
5277 if (tr->current_trace == &nop_trace)
5278 return;
5279
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005280 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005281
5282 if (tr->current_trace->reset)
5283 tr->current_trace->reset(tr);
5284
5285 tr->current_trace = &nop_trace;
5286}
5287
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005288static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005289{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005290 /* Only enable if the directory has been created already. */
5291 if (!tr->dir)
5292 return;
5293
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005294 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005295}
5296
5297static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5298{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005299 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005300#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005301 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005302#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005303 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005304
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005305 mutex_lock(&trace_types_lock);
5306
Steven Rostedt73c51622009-03-11 13:42:01 -04005307 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005308 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005309 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005310 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005311 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005312 ret = 0;
5313 }
5314
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005315 for (t = trace_types; t; t = t->next) {
5316 if (strcmp(t->name, buf) == 0)
5317 break;
5318 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005319 if (!t) {
5320 ret = -EINVAL;
5321 goto out;
5322 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005323 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005324 goto out;
5325
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005326 /* Some tracers are only allowed for the top level buffer */
5327 if (!trace_ok_for_array(t, tr)) {
5328 ret = -EINVAL;
5329 goto out;
5330 }
5331
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005332 /* If trace pipe files are being read, we can't change the tracer */
5333 if (tr->current_trace->ref) {
5334 ret = -EBUSY;
5335 goto out;
5336 }
5337
Steven Rostedt9f029e82008-11-12 15:24:24 -05005338 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005339
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005340 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005341
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005342 if (tr->current_trace->reset)
5343 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005344
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005345 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005346 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005347
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005348#ifdef CONFIG_TRACER_MAX_TRACE
5349 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005350
5351 if (had_max_tr && !t->use_max_tr) {
5352 /*
5353 * We need to make sure that the update_max_tr sees that
5354 * current_trace changed to nop_trace to keep it from
5355 * swapping the buffers after we resize it.
5356 * The update_max_tr is called from interrupts disabled
5357 * so a synchronized_sched() is sufficient.
5358 */
5359 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005360 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005361 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005362#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005363
5364#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005365 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005366 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005367 if (ret < 0)
5368 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005369 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005370#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005371
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005372 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005373 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005374 if (ret)
5375 goto out;
5376 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005377
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005378 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005379 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005380 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005381 out:
5382 mutex_unlock(&trace_types_lock);
5383
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005384 return ret;
5385}
5386
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005387static ssize_t
5388tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5389 size_t cnt, loff_t *ppos)
5390{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005391 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005392 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005393 int i;
5394 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005395 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005396
Steven Rostedt60063a62008-10-28 10:44:24 -04005397 ret = cnt;
5398
Li Zefanee6c2c12009-09-18 14:06:47 +08005399 if (cnt > MAX_TRACER_SIZE)
5400 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005401
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005402 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005403 return -EFAULT;
5404
5405 buf[cnt] = 0;
5406
5407 /* strip ending whitespace. */
5408 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5409 buf[i] = 0;
5410
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005411 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005412 if (err)
5413 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005414
Jiri Olsacf8517c2009-10-23 19:36:16 -04005415 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005416
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005417 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005418}
5419
5420static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005421tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5422 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005423{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005424 char buf[64];
5425 int r;
5426
Steven Rostedtcffae432008-05-12 21:21:00 +02005427 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005428 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005429 if (r > sizeof(buf))
5430 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005431 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005432}
5433
5434static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005435tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5436 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005437{
Hannes Eder5e398412009-02-10 19:44:34 +01005438 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005439 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005440
Peter Huewe22fe9b52011-06-07 21:58:27 +02005441 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5442 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005443 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005444
5445 *ptr = val * 1000;
5446
5447 return cnt;
5448}
5449
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005450static ssize_t
5451tracing_thresh_read(struct file *filp, char __user *ubuf,
5452 size_t cnt, loff_t *ppos)
5453{
5454 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5455}
5456
5457static ssize_t
5458tracing_thresh_write(struct file *filp, const char __user *ubuf,
5459 size_t cnt, loff_t *ppos)
5460{
5461 struct trace_array *tr = filp->private_data;
5462 int ret;
5463
5464 mutex_lock(&trace_types_lock);
5465 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5466 if (ret < 0)
5467 goto out;
5468
5469 if (tr->current_trace->update_thresh) {
5470 ret = tr->current_trace->update_thresh(tr);
5471 if (ret < 0)
5472 goto out;
5473 }
5474
5475 ret = cnt;
5476out:
5477 mutex_unlock(&trace_types_lock);
5478
5479 return ret;
5480}
5481
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005482#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005483
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005484static ssize_t
5485tracing_max_lat_read(struct file *filp, char __user *ubuf,
5486 size_t cnt, loff_t *ppos)
5487{
5488 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5489}
5490
5491static ssize_t
5492tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5493 size_t cnt, loff_t *ppos)
5494{
5495 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5496}
5497
Chen Gange428abb2015-11-10 05:15:15 +08005498#endif
5499
Steven Rostedtb3806b42008-05-12 21:20:46 +02005500static int tracing_open_pipe(struct inode *inode, struct file *filp)
5501{
Oleg Nesterov15544202013-07-23 17:25:57 +02005502 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005503 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005504 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005505
5506 if (tracing_disabled)
5507 return -ENODEV;
5508
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005509 if (trace_array_get(tr) < 0)
5510 return -ENODEV;
5511
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005512 mutex_lock(&trace_types_lock);
5513
Steven Rostedtb3806b42008-05-12 21:20:46 +02005514 /* create a buffer to store the information to pass to userspace */
5515 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005516 if (!iter) {
5517 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005518 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005519 goto out;
5520 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005521
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005522 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005523 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005524
5525 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5526 ret = -ENOMEM;
5527 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305528 }
5529
Steven Rostedta3097202008-11-07 22:36:02 -05005530 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305531 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005532
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005533 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005534 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5535
David Sharp8be07092012-11-13 12:18:22 -08005536 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005537 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005538 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5539
Oleg Nesterov15544202013-07-23 17:25:57 +02005540 iter->tr = tr;
5541 iter->trace_buffer = &tr->trace_buffer;
5542 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005543 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005544 filp->private_data = iter;
5545
Steven Rostedt107bad82008-05-12 21:21:01 +02005546 if (iter->trace->pipe_open)
5547 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005548
Arnd Bergmannb4447862010-07-07 23:40:11 +02005549 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005550
5551 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005552out:
5553 mutex_unlock(&trace_types_lock);
5554 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005555
5556fail:
5557 kfree(iter->trace);
5558 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005559 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005560 mutex_unlock(&trace_types_lock);
5561 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005562}
5563
5564static int tracing_release_pipe(struct inode *inode, struct file *file)
5565{
5566 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005567 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005568
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005569 mutex_lock(&trace_types_lock);
5570
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005571 tr->current_trace->ref--;
5572
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005573 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005574 iter->trace->pipe_close(iter);
5575
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005576 mutex_unlock(&trace_types_lock);
5577
Rusty Russell44623442009-01-01 10:12:23 +10305578 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005579 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005580 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005581
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005582 trace_array_put(tr);
5583
Steven Rostedtb3806b42008-05-12 21:20:46 +02005584 return 0;
5585}
5586
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005587static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005588trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005589{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005590 struct trace_array *tr = iter->tr;
5591
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005592 /* Iterators are static, they should be filled or empty */
5593 if (trace_buffer_iter(iter, iter->cpu_file))
5594 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005595
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005596 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005597 /*
5598 * Always select as readable when in blocking mode
5599 */
5600 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005601 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005602 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005603 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005604}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005605
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005606static unsigned int
5607tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5608{
5609 struct trace_iterator *iter = filp->private_data;
5610
5611 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005612}
5613
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005614/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005615static int tracing_wait_pipe(struct file *filp)
5616{
5617 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005618 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005619
5620 while (trace_empty(iter)) {
5621
5622 if ((filp->f_flags & O_NONBLOCK)) {
5623 return -EAGAIN;
5624 }
5625
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005626 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005627 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005628 * We still block if tracing is disabled, but we have never
5629 * read anything. This allows a user to cat this file, and
5630 * then enable tracing. But after we have read something,
5631 * we give an EOF when tracing is again disabled.
5632 *
5633 * iter->pos will be 0 if we haven't read anything.
5634 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04005635 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005636 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005637
5638 mutex_unlock(&iter->mutex);
5639
Rabin Vincente30f53a2014-11-10 19:46:34 +01005640 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005641
5642 mutex_lock(&iter->mutex);
5643
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005644 if (ret)
5645 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005646 }
5647
5648 return 1;
5649}
5650
Steven Rostedtb3806b42008-05-12 21:20:46 +02005651/*
5652 * Consumer reader.
5653 */
5654static ssize_t
5655tracing_read_pipe(struct file *filp, char __user *ubuf,
5656 size_t cnt, loff_t *ppos)
5657{
5658 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005659 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005660
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005661 /*
5662 * Avoid more than one consumer on a single file descriptor
5663 * This is just a matter of traces coherency, the ring buffer itself
5664 * is protected.
5665 */
5666 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005667
5668 /* return any leftover data */
5669 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5670 if (sret != -EBUSY)
5671 goto out;
5672
5673 trace_seq_init(&iter->seq);
5674
Steven Rostedt107bad82008-05-12 21:21:01 +02005675 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005676 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5677 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005678 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005679 }
5680
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005681waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005682 sret = tracing_wait_pipe(filp);
5683 if (sret <= 0)
5684 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005685
5686 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005687 if (trace_empty(iter)) {
5688 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005689 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005690 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005691
5692 if (cnt >= PAGE_SIZE)
5693 cnt = PAGE_SIZE - 1;
5694
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005695 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005696 memset(&iter->seq, 0,
5697 sizeof(struct trace_iterator) -
5698 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005699 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005700 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005701
Lai Jiangshan4f535962009-05-18 19:35:34 +08005702 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005703 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005704 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005705 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005706 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005707
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005708 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005709 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005710 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005711 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005712 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005713 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005714 if (ret != TRACE_TYPE_NO_CONSUME)
5715 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005716
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005717 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005718 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005719
5720 /*
5721 * Setting the full flag means we reached the trace_seq buffer
5722 * size and we should leave by partial output condition above.
5723 * One of the trace_seq_* functions is not used properly.
5724 */
5725 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5726 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005727 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005728 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005729 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005730
Steven Rostedtb3806b42008-05-12 21:20:46 +02005731 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005732 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005733 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005734 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005735
5736 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005737 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005738 * entries, go back to wait for more entries.
5739 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005740 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005741 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005742
Steven Rostedt107bad82008-05-12 21:21:01 +02005743out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005744 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005745
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005746 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005747}
5748
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005749static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5750 unsigned int idx)
5751{
5752 __free_page(spd->pages[idx]);
5753}
5754
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005755static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005756 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005757 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005758 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005759 .steal = generic_pipe_buf_steal,
5760 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005761};
5762
Steven Rostedt34cd4992009-02-09 12:06:29 -05005763static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005764tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005765{
5766 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005767 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005768 int ret;
5769
5770 /* Seq buffer is page-sized, exactly what we need. */
5771 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005772 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005773 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005774
5775 if (trace_seq_has_overflowed(&iter->seq)) {
5776 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005777 break;
5778 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005779
5780 /*
5781 * This should not be hit, because it should only
5782 * be set if the iter->seq overflowed. But check it
5783 * anyway to be safe.
5784 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005785 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005786 iter->seq.seq.len = save_len;
5787 break;
5788 }
5789
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005790 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005791 if (rem < count) {
5792 rem = 0;
5793 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005794 break;
5795 }
5796
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005797 if (ret != TRACE_TYPE_NO_CONSUME)
5798 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005799 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005800 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005801 rem = 0;
5802 iter->ent = NULL;
5803 break;
5804 }
5805 }
5806
5807 return rem;
5808}
5809
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005810static ssize_t tracing_splice_read_pipe(struct file *filp,
5811 loff_t *ppos,
5812 struct pipe_inode_info *pipe,
5813 size_t len,
5814 unsigned int flags)
5815{
Jens Axboe35f3d142010-05-20 10:43:18 +02005816 struct page *pages_def[PIPE_DEF_BUFFERS];
5817 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005818 struct trace_iterator *iter = filp->private_data;
5819 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005820 .pages = pages_def,
5821 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005822 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005823 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005824 .ops = &tracing_pipe_buf_ops,
5825 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005826 };
5827 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005828 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005829 unsigned int i;
5830
Jens Axboe35f3d142010-05-20 10:43:18 +02005831 if (splice_grow_spd(pipe, &spd))
5832 return -ENOMEM;
5833
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005834 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005835
5836 if (iter->trace->splice_read) {
5837 ret = iter->trace->splice_read(iter, filp,
5838 ppos, pipe, len, flags);
5839 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005840 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005841 }
5842
5843 ret = tracing_wait_pipe(filp);
5844 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005845 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005846
Jason Wessel955b61e2010-08-05 09:22:23 -05005847 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005848 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005849 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005850 }
5851
Lai Jiangshan4f535962009-05-18 19:35:34 +08005852 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005853 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005854
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005855 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005856 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005857 spd.pages[i] = alloc_page(GFP_KERNEL);
5858 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005859 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005860
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005861 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005862
5863 /* Copy the data into the page, so we can start over. */
5864 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005865 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005866 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005867 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005868 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005869 break;
5870 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005871 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005872 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005873
Steven Rostedtf9520752009-03-02 14:04:40 -05005874 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005875 }
5876
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005877 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005878 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005879 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005880
5881 spd.nr_pages = i;
5882
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005883 if (i)
5884 ret = splice_to_pipe(pipe, &spd);
5885 else
5886 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005887out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005888 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005889 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005890
Steven Rostedt34cd4992009-02-09 12:06:29 -05005891out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005892 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005893 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005894}
5895
Steven Rostedta98a3c32008-05-12 21:20:59 +02005896static ssize_t
5897tracing_entries_read(struct file *filp, char __user *ubuf,
5898 size_t cnt, loff_t *ppos)
5899{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005900 struct inode *inode = file_inode(filp);
5901 struct trace_array *tr = inode->i_private;
5902 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005903 char buf[64];
5904 int r = 0;
5905 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005906
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005907 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005908
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005909 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005910 int cpu, buf_size_same;
5911 unsigned long size;
5912
5913 size = 0;
5914 buf_size_same = 1;
5915 /* check if all cpu sizes are same */
5916 for_each_tracing_cpu(cpu) {
5917 /* fill in the size from first enabled cpu */
5918 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005919 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5920 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005921 buf_size_same = 0;
5922 break;
5923 }
5924 }
5925
5926 if (buf_size_same) {
5927 if (!ring_buffer_expanded)
5928 r = sprintf(buf, "%lu (expanded: %lu)\n",
5929 size >> 10,
5930 trace_buf_size >> 10);
5931 else
5932 r = sprintf(buf, "%lu\n", size >> 10);
5933 } else
5934 r = sprintf(buf, "X\n");
5935 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005936 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005937
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005938 mutex_unlock(&trace_types_lock);
5939
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005940 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5941 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005942}
5943
5944static ssize_t
5945tracing_entries_write(struct file *filp, const char __user *ubuf,
5946 size_t cnt, loff_t *ppos)
5947{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005948 struct inode *inode = file_inode(filp);
5949 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005950 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005951 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005952
Peter Huewe22fe9b52011-06-07 21:58:27 +02005953 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5954 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005955 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005956
5957 /* must have at least 1 entry */
5958 if (!val)
5959 return -EINVAL;
5960
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005961 /* value is in KB */
5962 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005963 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005964 if (ret < 0)
5965 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005966
Jiri Olsacf8517c2009-10-23 19:36:16 -04005967 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005968
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005969 return cnt;
5970}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005971
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005972static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005973tracing_total_entries_read(struct file *filp, char __user *ubuf,
5974 size_t cnt, loff_t *ppos)
5975{
5976 struct trace_array *tr = filp->private_data;
5977 char buf[64];
5978 int r, cpu;
5979 unsigned long size = 0, expanded_size = 0;
5980
5981 mutex_lock(&trace_types_lock);
5982 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005983 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005984 if (!ring_buffer_expanded)
5985 expanded_size += trace_buf_size >> 10;
5986 }
5987 if (ring_buffer_expanded)
5988 r = sprintf(buf, "%lu\n", size);
5989 else
5990 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5991 mutex_unlock(&trace_types_lock);
5992
5993 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5994}
5995
5996static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005997tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5998 size_t cnt, loff_t *ppos)
5999{
6000 /*
6001 * There is no need to read what the user has written, this function
6002 * is just to make sure that there is no error when "echo" is used
6003 */
6004
6005 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006006
6007 return cnt;
6008}
6009
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006010static int
6011tracing_free_buffer_release(struct inode *inode, struct file *filp)
6012{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006013 struct trace_array *tr = inode->i_private;
6014
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006015 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006016 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006017 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006018 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006019 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006020
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006021 trace_array_put(tr);
6022
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006023 return 0;
6024}
6025
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006026static ssize_t
6027tracing_mark_write(struct file *filp, const char __user *ubuf,
6028 size_t cnt, loff_t *fpos)
6029{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006030 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006031 struct ring_buffer_event *event;
6032 struct ring_buffer *buffer;
6033 struct print_entry *entry;
6034 unsigned long irq_flags;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006035 const char faulted[] = "<faulted>";
Steven Rostedtd696b582011-09-22 11:50:27 -04006036 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006037 int size;
6038 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006039
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006040/* Used in tracing_mark_raw_write() as well */
6041#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006042
Steven Rostedtc76f0692008-11-07 22:36:02 -05006043 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006044 return -EINVAL;
6045
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006046 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006047 return -EINVAL;
6048
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006049 if (cnt > TRACE_BUF_SIZE)
6050 cnt = TRACE_BUF_SIZE;
6051
Steven Rostedtd696b582011-09-22 11:50:27 -04006052 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006053
Steven Rostedtd696b582011-09-22 11:50:27 -04006054 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006055 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6056
6057 /* If less than "<faulted>", then make sure we can still add that */
6058 if (cnt < FAULTED_SIZE)
6059 size += FAULTED_SIZE - cnt;
6060
Alexander Z Lam2d716192013-07-01 15:31:24 -07006061 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006062 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6063 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006064 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006065 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006066 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006067
6068 entry = ring_buffer_event_data(event);
6069 entry->ip = _THIS_IP_;
6070
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006071 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6072 if (len) {
6073 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6074 cnt = FAULTED_SIZE;
6075 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006076 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006077 written = cnt;
6078 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006079
6080 if (entry->buf[cnt - 1] != '\n') {
6081 entry->buf[cnt] = '\n';
6082 entry->buf[cnt + 1] = '\0';
6083 } else
6084 entry->buf[cnt] = '\0';
6085
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006086 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006087
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006088 if (written > 0)
6089 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006090
Steven Rostedtfa32e852016-07-06 15:25:08 -04006091 return written;
6092}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006093
Steven Rostedtfa32e852016-07-06 15:25:08 -04006094/* Limit it for now to 3K (including tag) */
6095#define RAW_DATA_MAX_SIZE (1024*3)
6096
6097static ssize_t
6098tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6099 size_t cnt, loff_t *fpos)
6100{
6101 struct trace_array *tr = filp->private_data;
6102 struct ring_buffer_event *event;
6103 struct ring_buffer *buffer;
6104 struct raw_data_entry *entry;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006105 const char faulted[] = "<faulted>";
Steven Rostedtfa32e852016-07-06 15:25:08 -04006106 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006107 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006108 int size;
6109 int len;
6110
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006111#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6112
Steven Rostedtfa32e852016-07-06 15:25:08 -04006113 if (tracing_disabled)
6114 return -EINVAL;
6115
6116 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6117 return -EINVAL;
6118
6119 /* The marker must at least have a tag id */
6120 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6121 return -EINVAL;
6122
6123 if (cnt > TRACE_BUF_SIZE)
6124 cnt = TRACE_BUF_SIZE;
6125
6126 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6127
Steven Rostedtfa32e852016-07-06 15:25:08 -04006128 local_save_flags(irq_flags);
6129 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006130 if (cnt < FAULT_SIZE_ID)
6131 size += FAULT_SIZE_ID - cnt;
6132
Steven Rostedtfa32e852016-07-06 15:25:08 -04006133 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006134 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6135 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006136 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006137 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006138 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006139
6140 entry = ring_buffer_event_data(event);
6141
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006142 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6143 if (len) {
6144 entry->id = -1;
6145 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6146 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006147 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006148 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006149
6150 __buffer_unlock_commit(buffer, event);
6151
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006152 if (written > 0)
6153 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006154
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006155 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006156}
6157
Li Zefan13f16d22009-12-08 11:16:11 +08006158static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006159{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006160 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006161 int i;
6162
6163 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006164 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006165 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006166 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6167 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006168 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006169
Li Zefan13f16d22009-12-08 11:16:11 +08006170 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006171}
6172
Steven Rostedte1e232c2014-02-10 23:38:46 -05006173static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006174{
Zhaolei5079f322009-08-25 16:12:56 +08006175 int i;
6176
Zhaolei5079f322009-08-25 16:12:56 +08006177 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6178 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6179 break;
6180 }
6181 if (i == ARRAY_SIZE(trace_clocks))
6182 return -EINVAL;
6183
Zhaolei5079f322009-08-25 16:12:56 +08006184 mutex_lock(&trace_types_lock);
6185
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006186 tr->clock_id = i;
6187
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006188 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006189
David Sharp60303ed2012-10-11 16:27:52 -07006190 /*
6191 * New clock may not be consistent with the previous clock.
6192 * Reset the buffer so that it doesn't have incomparable timestamps.
6193 */
Alexander Z Lam94571582013-08-02 18:36:16 -07006194 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006195
6196#ifdef CONFIG_TRACER_MAX_TRACE
6197 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
6198 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006199 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006200#endif
David Sharp60303ed2012-10-11 16:27:52 -07006201
Zhaolei5079f322009-08-25 16:12:56 +08006202 mutex_unlock(&trace_types_lock);
6203
Steven Rostedte1e232c2014-02-10 23:38:46 -05006204 return 0;
6205}
6206
6207static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6208 size_t cnt, loff_t *fpos)
6209{
6210 struct seq_file *m = filp->private_data;
6211 struct trace_array *tr = m->private;
6212 char buf[64];
6213 const char *clockstr;
6214 int ret;
6215
6216 if (cnt >= sizeof(buf))
6217 return -EINVAL;
6218
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006219 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006220 return -EFAULT;
6221
6222 buf[cnt] = 0;
6223
6224 clockstr = strstrip(buf);
6225
6226 ret = tracing_set_clock(tr, clockstr);
6227 if (ret)
6228 return ret;
6229
Zhaolei5079f322009-08-25 16:12:56 +08006230 *fpos += cnt;
6231
6232 return cnt;
6233}
6234
Li Zefan13f16d22009-12-08 11:16:11 +08006235static int tracing_clock_open(struct inode *inode, struct file *file)
6236{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006237 struct trace_array *tr = inode->i_private;
6238 int ret;
6239
Li Zefan13f16d22009-12-08 11:16:11 +08006240 if (tracing_disabled)
6241 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006242
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006243 if (trace_array_get(tr))
6244 return -ENODEV;
6245
6246 ret = single_open(file, tracing_clock_show, inode->i_private);
6247 if (ret < 0)
6248 trace_array_put(tr);
6249
6250 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006251}
6252
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006253struct ftrace_buffer_info {
6254 struct trace_iterator iter;
6255 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006256 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006257 unsigned int read;
6258};
6259
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006260#ifdef CONFIG_TRACER_SNAPSHOT
6261static int tracing_snapshot_open(struct inode *inode, struct file *file)
6262{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006263 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006264 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006265 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006266 int ret = 0;
6267
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006268 if (trace_array_get(tr) < 0)
6269 return -ENODEV;
6270
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006271 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006272 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006273 if (IS_ERR(iter))
6274 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006275 } else {
6276 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006277 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006278 m = kzalloc(sizeof(*m), GFP_KERNEL);
6279 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006280 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006281 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6282 if (!iter) {
6283 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006284 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006285 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006286 ret = 0;
6287
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006288 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006289 iter->trace_buffer = &tr->max_buffer;
6290 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006291 m->private = iter;
6292 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006293 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006294out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006295 if (ret < 0)
6296 trace_array_put(tr);
6297
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006298 return ret;
6299}
6300
6301static ssize_t
6302tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6303 loff_t *ppos)
6304{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006305 struct seq_file *m = filp->private_data;
6306 struct trace_iterator *iter = m->private;
6307 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006308 unsigned long val;
6309 int ret;
6310
6311 ret = tracing_update_buffers();
6312 if (ret < 0)
6313 return ret;
6314
6315 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6316 if (ret)
6317 return ret;
6318
6319 mutex_lock(&trace_types_lock);
6320
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006321 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006322 ret = -EBUSY;
6323 goto out;
6324 }
6325
6326 switch (val) {
6327 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006328 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6329 ret = -EINVAL;
6330 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006331 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006332 if (tr->allocated_snapshot)
6333 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006334 break;
6335 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006336/* Only allow per-cpu swap if the ring buffer supports it */
6337#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6338 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6339 ret = -EINVAL;
6340 break;
6341 }
6342#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006343 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006344 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006345 if (ret < 0)
6346 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006347 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006348 local_irq_disable();
6349 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006350 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006351 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006352 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006353 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006354 local_irq_enable();
6355 break;
6356 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006357 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006358 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6359 tracing_reset_online_cpus(&tr->max_buffer);
6360 else
6361 tracing_reset(&tr->max_buffer, iter->cpu_file);
6362 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006363 break;
6364 }
6365
6366 if (ret >= 0) {
6367 *ppos += cnt;
6368 ret = cnt;
6369 }
6370out:
6371 mutex_unlock(&trace_types_lock);
6372 return ret;
6373}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006374
6375static int tracing_snapshot_release(struct inode *inode, struct file *file)
6376{
6377 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006378 int ret;
6379
6380 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006381
6382 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006383 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006384
6385 /* If write only, the seq_file is just a stub */
6386 if (m)
6387 kfree(m->private);
6388 kfree(m);
6389
6390 return 0;
6391}
6392
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006393static int tracing_buffers_open(struct inode *inode, struct file *filp);
6394static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6395 size_t count, loff_t *ppos);
6396static int tracing_buffers_release(struct inode *inode, struct file *file);
6397static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6398 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6399
6400static int snapshot_raw_open(struct inode *inode, struct file *filp)
6401{
6402 struct ftrace_buffer_info *info;
6403 int ret;
6404
6405 ret = tracing_buffers_open(inode, filp);
6406 if (ret < 0)
6407 return ret;
6408
6409 info = filp->private_data;
6410
6411 if (info->iter.trace->use_max_tr) {
6412 tracing_buffers_release(inode, filp);
6413 return -EBUSY;
6414 }
6415
6416 info->iter.snapshot = true;
6417 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6418
6419 return ret;
6420}
6421
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006422#endif /* CONFIG_TRACER_SNAPSHOT */
6423
6424
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006425static const struct file_operations tracing_thresh_fops = {
6426 .open = tracing_open_generic,
6427 .read = tracing_thresh_read,
6428 .write = tracing_thresh_write,
6429 .llseek = generic_file_llseek,
6430};
6431
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006432#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006433static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006434 .open = tracing_open_generic,
6435 .read = tracing_max_lat_read,
6436 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006437 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006438};
Chen Gange428abb2015-11-10 05:15:15 +08006439#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006440
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006441static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006442 .open = tracing_open_generic,
6443 .read = tracing_set_trace_read,
6444 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006445 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006446};
6447
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006448static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006449 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006450 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006451 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006452 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006453 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006454 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006455};
6456
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006457static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006458 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006459 .read = tracing_entries_read,
6460 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006461 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006462 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006463};
6464
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006465static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006466 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006467 .read = tracing_total_entries_read,
6468 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006469 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006470};
6471
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006472static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006473 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006474 .write = tracing_free_buffer_write,
6475 .release = tracing_free_buffer_release,
6476};
6477
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006478static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006479 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006480 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006481 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006482 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006483};
6484
Steven Rostedtfa32e852016-07-06 15:25:08 -04006485static const struct file_operations tracing_mark_raw_fops = {
6486 .open = tracing_open_generic_tr,
6487 .write = tracing_mark_raw_write,
6488 .llseek = generic_file_llseek,
6489 .release = tracing_release_generic_tr,
6490};
6491
Zhaolei5079f322009-08-25 16:12:56 +08006492static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006493 .open = tracing_clock_open,
6494 .read = seq_read,
6495 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006496 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006497 .write = tracing_clock_write,
6498};
6499
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006500#ifdef CONFIG_TRACER_SNAPSHOT
6501static const struct file_operations snapshot_fops = {
6502 .open = tracing_snapshot_open,
6503 .read = seq_read,
6504 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006505 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006506 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006507};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006508
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006509static const struct file_operations snapshot_raw_fops = {
6510 .open = snapshot_raw_open,
6511 .read = tracing_buffers_read,
6512 .release = tracing_buffers_release,
6513 .splice_read = tracing_buffers_splice_read,
6514 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006515};
6516
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006517#endif /* CONFIG_TRACER_SNAPSHOT */
6518
Steven Rostedt2cadf912008-12-01 22:20:19 -05006519static int tracing_buffers_open(struct inode *inode, struct file *filp)
6520{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006521 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006522 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006523 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006524
6525 if (tracing_disabled)
6526 return -ENODEV;
6527
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006528 if (trace_array_get(tr) < 0)
6529 return -ENODEV;
6530
Steven Rostedt2cadf912008-12-01 22:20:19 -05006531 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006532 if (!info) {
6533 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006534 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006535 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006536
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006537 mutex_lock(&trace_types_lock);
6538
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006539 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006540 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006541 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006542 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006543 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006544 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006545 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006546
6547 filp->private_data = info;
6548
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006549 tr->current_trace->ref++;
6550
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006551 mutex_unlock(&trace_types_lock);
6552
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006553 ret = nonseekable_open(inode, filp);
6554 if (ret < 0)
6555 trace_array_put(tr);
6556
6557 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006558}
6559
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006560static unsigned int
6561tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6562{
6563 struct ftrace_buffer_info *info = filp->private_data;
6564 struct trace_iterator *iter = &info->iter;
6565
6566 return trace_poll(iter, filp, poll_table);
6567}
6568
Steven Rostedt2cadf912008-12-01 22:20:19 -05006569static ssize_t
6570tracing_buffers_read(struct file *filp, char __user *ubuf,
6571 size_t count, loff_t *ppos)
6572{
6573 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006574 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006575 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006576 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006577
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006578 if (!count)
6579 return 0;
6580
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006581#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006582 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6583 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006584#endif
6585
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006586 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006587 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6588 iter->cpu_file);
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006589 info->spare_cpu = iter->cpu_file;
6590 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006591 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006592 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006593
Steven Rostedt2cadf912008-12-01 22:20:19 -05006594 /* Do we have previous read data to read? */
6595 if (info->read < PAGE_SIZE)
6596 goto read;
6597
Steven Rostedtb6273442013-02-28 13:44:11 -05006598 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006599 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006600 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006601 &info->spare,
6602 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006603 iter->cpu_file, 0);
6604 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006605
6606 if (ret < 0) {
6607 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006608 if ((filp->f_flags & O_NONBLOCK))
6609 return -EAGAIN;
6610
Rabin Vincente30f53a2014-11-10 19:46:34 +01006611 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006612 if (ret)
6613 return ret;
6614
Steven Rostedtb6273442013-02-28 13:44:11 -05006615 goto again;
6616 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006617 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006618 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006619
Steven Rostedt436fc282011-10-14 10:44:25 -04006620 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006621 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006622 size = PAGE_SIZE - info->read;
6623 if (size > count)
6624 size = count;
6625
6626 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006627 if (ret == size)
6628 return -EFAULT;
6629
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006630 size -= ret;
6631
Steven Rostedt2cadf912008-12-01 22:20:19 -05006632 *ppos += size;
6633 info->read += size;
6634
6635 return size;
6636}
6637
6638static int tracing_buffers_release(struct inode *inode, struct file *file)
6639{
6640 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006641 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006642
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006643 mutex_lock(&trace_types_lock);
6644
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006645 iter->tr->current_trace->ref--;
6646
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006647 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006648
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006649 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006650 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6651 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006652 kfree(info);
6653
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006654 mutex_unlock(&trace_types_lock);
6655
Steven Rostedt2cadf912008-12-01 22:20:19 -05006656 return 0;
6657}
6658
6659struct buffer_ref {
6660 struct ring_buffer *buffer;
6661 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006662 int cpu;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006663 int ref;
6664};
6665
6666static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6667 struct pipe_buffer *buf)
6668{
6669 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6670
6671 if (--ref->ref)
6672 return;
6673
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006674 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006675 kfree(ref);
6676 buf->private = 0;
6677}
6678
Steven Rostedt2cadf912008-12-01 22:20:19 -05006679static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6680 struct pipe_buffer *buf)
6681{
6682 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6683
6684 ref->ref++;
6685}
6686
6687/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006688static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006689 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006690 .confirm = generic_pipe_buf_confirm,
6691 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006692 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006693 .get = buffer_pipe_buf_get,
6694};
6695
6696/*
6697 * Callback from splice_to_pipe(), if we need to release some pages
6698 * at the end of the spd in case we error'ed out in filling the pipe.
6699 */
6700static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6701{
6702 struct buffer_ref *ref =
6703 (struct buffer_ref *)spd->partial[i].private;
6704
6705 if (--ref->ref)
6706 return;
6707
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006708 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006709 kfree(ref);
6710 spd->partial[i].private = 0;
6711}
6712
6713static ssize_t
6714tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6715 struct pipe_inode_info *pipe, size_t len,
6716 unsigned int flags)
6717{
6718 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006719 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006720 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6721 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006722 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006723 .pages = pages_def,
6724 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006725 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006726 .ops = &buffer_pipe_buf_ops,
6727 .spd_release = buffer_spd_release,
6728 };
6729 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04006730 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006731 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006732
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006733#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006734 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6735 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006736#endif
6737
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006738 if (*ppos & (PAGE_SIZE - 1))
6739 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006740
6741 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006742 if (len < PAGE_SIZE)
6743 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006744 len &= PAGE_MASK;
6745 }
6746
Al Viro1ae22932016-09-17 18:31:46 -04006747 if (splice_grow_spd(pipe, &spd))
6748 return -ENOMEM;
6749
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006750 again:
6751 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006752 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006753
Al Viroa786c062014-04-11 12:01:03 -04006754 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006755 struct page *page;
6756 int r;
6757
6758 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006759 if (!ref) {
6760 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006761 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006762 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006763
Steven Rostedt7267fa62009-04-29 00:16:21 -04006764 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006765 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006766 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006767 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006768 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006769 kfree(ref);
6770 break;
6771 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006772 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006773
6774 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006775 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006776 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006777 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6778 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006779 kfree(ref);
6780 break;
6781 }
6782
6783 /*
6784 * zero out any left over data, this is going to
6785 * user land.
6786 */
6787 size = ring_buffer_page_len(ref->page);
6788 if (size < PAGE_SIZE)
6789 memset(ref->page + size, 0, PAGE_SIZE - size);
6790
6791 page = virt_to_page(ref->page);
6792
6793 spd.pages[i] = page;
6794 spd.partial[i].len = PAGE_SIZE;
6795 spd.partial[i].offset = 0;
6796 spd.partial[i].private = (unsigned long)ref;
6797 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006798 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006799
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006800 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006801 }
6802
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006803 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006804 spd.nr_pages = i;
6805
6806 /* did we read anything? */
6807 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006808 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006809 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006810
Al Viro1ae22932016-09-17 18:31:46 -04006811 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006812 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006813 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006814
Rabin Vincente30f53a2014-11-10 19:46:34 +01006815 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006816 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006817 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006818
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006819 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006820 }
6821
6822 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006823out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006824 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006825
Steven Rostedt2cadf912008-12-01 22:20:19 -05006826 return ret;
6827}
6828
6829static const struct file_operations tracing_buffers_fops = {
6830 .open = tracing_buffers_open,
6831 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006832 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006833 .release = tracing_buffers_release,
6834 .splice_read = tracing_buffers_splice_read,
6835 .llseek = no_llseek,
6836};
6837
Steven Rostedtc8d77182009-04-29 18:03:45 -04006838static ssize_t
6839tracing_stats_read(struct file *filp, char __user *ubuf,
6840 size_t count, loff_t *ppos)
6841{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006842 struct inode *inode = file_inode(filp);
6843 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006844 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006845 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006846 struct trace_seq *s;
6847 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006848 unsigned long long t;
6849 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006850
Li Zefane4f2d102009-06-15 10:57:28 +08006851 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006852 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006853 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006854
6855 trace_seq_init(s);
6856
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006857 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006858 trace_seq_printf(s, "entries: %ld\n", cnt);
6859
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006860 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006861 trace_seq_printf(s, "overrun: %ld\n", cnt);
6862
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006863 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006864 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6865
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006866 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006867 trace_seq_printf(s, "bytes: %ld\n", cnt);
6868
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006869 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006870 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006871 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006872 usec_rem = do_div(t, USEC_PER_SEC);
6873 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6874 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006875
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006876 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006877 usec_rem = do_div(t, USEC_PER_SEC);
6878 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6879 } else {
6880 /* counter or tsc mode for trace_clock */
6881 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006882 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006883
6884 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006885 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006886 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006887
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006888 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006889 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6890
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006891 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006892 trace_seq_printf(s, "read events: %ld\n", cnt);
6893
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006894 count = simple_read_from_buffer(ubuf, count, ppos,
6895 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006896
6897 kfree(s);
6898
6899 return count;
6900}
6901
6902static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006903 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006904 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006905 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006906 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006907};
6908
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006909#ifdef CONFIG_DYNAMIC_FTRACE
6910
6911static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006912tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006913 size_t cnt, loff_t *ppos)
6914{
6915 unsigned long *p = filp->private_data;
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04006916 char buf[64]; /* Not too big for a shallow stack */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006917 int r;
6918
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04006919 r = scnprintf(buf, 63, "%ld", *p);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006920 buf[r++] = '\n';
6921
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04006922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006923}
6924
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006925static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006926 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006927 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006928 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006929};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006930#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006931
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006932#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6933static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04006934ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006935 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006936 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006937{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04006938 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006939}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006940
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006941static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04006942ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006943 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006944 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006945{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006946 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006947 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006948
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006949 if (mapper)
6950 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006951
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006952 if (count) {
6953
6954 if (*count <= 0)
6955 return;
6956
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006957 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006958 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006959
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04006960 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006961}
6962
6963static int
6964ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6965 struct ftrace_probe_ops *ops, void *data)
6966{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006967 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006968 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006969
6970 seq_printf(m, "%ps:", (void *)ip);
6971
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006972 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006973
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006974 if (mapper)
6975 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6976
6977 if (count)
6978 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006979 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006980 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006981
6982 return 0;
6983}
6984
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006985static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006986ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006987 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006988{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006989 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006990
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006991 if (!mapper) {
6992 mapper = allocate_ftrace_func_mapper();
6993 if (!mapper)
6994 return -ENOMEM;
6995 *data = mapper;
6996 }
6997
6998 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006999}
7000
7001static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007002ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007003 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007004{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007005 struct ftrace_func_mapper *mapper = data;
7006
7007 if (!ip) {
7008 if (!mapper)
7009 return;
7010 free_ftrace_func_mapper(mapper, NULL);
7011 return;
7012 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007013
7014 ftrace_func_mapper_remove_ip(mapper, ip);
7015}
7016
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007017static struct ftrace_probe_ops snapshot_probe_ops = {
7018 .func = ftrace_snapshot,
7019 .print = ftrace_snapshot_print,
7020};
7021
7022static struct ftrace_probe_ops snapshot_count_probe_ops = {
7023 .func = ftrace_count_snapshot,
7024 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007025 .init = ftrace_snapshot_init,
7026 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007027};
7028
7029static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007030ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007031 char *glob, char *cmd, char *param, int enable)
7032{
7033 struct ftrace_probe_ops *ops;
7034 void *count = (void *)-1;
7035 char *number;
7036 int ret;
7037
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007038 if (!tr)
7039 return -ENODEV;
7040
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007041 /* hash funcs only work with set_ftrace_filter */
7042 if (!enable)
7043 return -EINVAL;
7044
7045 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7046
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007047 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007048 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007049
7050 if (!param)
7051 goto out_reg;
7052
7053 number = strsep(&param, ":");
7054
7055 if (!strlen(number))
7056 goto out_reg;
7057
7058 /*
7059 * We use the callback data field (which is a pointer)
7060 * as our counter.
7061 */
7062 ret = kstrtoul(number, 0, (unsigned long *)&count);
7063 if (ret)
7064 return ret;
7065
7066 out_reg:
Linus Torvalds4c174682017-05-03 18:41:21 -07007067 ret = alloc_snapshot(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007068 if (ret < 0)
7069 goto out;
7070
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007071 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007072
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007073 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007074 return ret < 0 ? ret : 0;
7075}
7076
7077static struct ftrace_func_command ftrace_snapshot_cmd = {
7078 .name = "snapshot",
7079 .func = ftrace_trace_snapshot_callback,
7080};
7081
Tom Zanussi38de93a2013-10-24 08:34:18 -05007082static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007083{
7084 return register_ftrace_command(&ftrace_snapshot_cmd);
7085}
7086#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007087static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007088#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007089
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007090static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007091{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007092 if (WARN_ON(!tr->dir))
7093 return ERR_PTR(-ENODEV);
7094
7095 /* Top directory uses NULL as the parent */
7096 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7097 return NULL;
7098
7099 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007100 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007101}
7102
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007103static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7104{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007105 struct dentry *d_tracer;
7106
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007107 if (tr->percpu_dir)
7108 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007109
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007110 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007111 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007112 return NULL;
7113
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007114 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007115
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007116 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007117 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007118
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007119 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007120}
7121
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007122static struct dentry *
7123trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7124 void *data, long cpu, const struct file_operations *fops)
7125{
7126 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7127
7128 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007129 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007130 return ret;
7131}
7132
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007133static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007134tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007135{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007136 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007137 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007138 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007139
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007140 if (!d_percpu)
7141 return;
7142
Steven Rostedtdd49a382010-10-20 21:51:26 -04007143 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007144 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007145 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007146 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007147 return;
7148 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007149
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007150 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007151 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02007152 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007153
7154 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007155 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007156 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04007157
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007158 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007159 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007160
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007161 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007162 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08007163
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007164 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007165 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007166
7167#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007168 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007169 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007170
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007171 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007172 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007173#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007174}
7175
Steven Rostedt60a11772008-05-12 21:20:44 +02007176#ifdef CONFIG_FTRACE_SELFTEST
7177/* Let selftest have access to static functions in this file */
7178#include "trace_selftest.c"
7179#endif
7180
Steven Rostedt577b7852009-02-26 23:43:05 -05007181static ssize_t
7182trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7183 loff_t *ppos)
7184{
7185 struct trace_option_dentry *topt = filp->private_data;
7186 char *buf;
7187
7188 if (topt->flags->val & topt->opt->bit)
7189 buf = "1\n";
7190 else
7191 buf = "0\n";
7192
7193 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7194}
7195
7196static ssize_t
7197trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7198 loff_t *ppos)
7199{
7200 struct trace_option_dentry *topt = filp->private_data;
7201 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007202 int ret;
7203
Peter Huewe22fe9b52011-06-07 21:58:27 +02007204 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7205 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007206 return ret;
7207
Li Zefan8d18eaa2009-12-08 11:17:06 +08007208 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007209 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007210
7211 if (!!(topt->flags->val & topt->opt->bit) != val) {
7212 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007213 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007214 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007215 mutex_unlock(&trace_types_lock);
7216 if (ret)
7217 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007218 }
7219
7220 *ppos += cnt;
7221
7222 return cnt;
7223}
7224
7225
7226static const struct file_operations trace_options_fops = {
7227 .open = tracing_open_generic,
7228 .read = trace_options_read,
7229 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007230 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007231};
7232
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007233/*
7234 * In order to pass in both the trace_array descriptor as well as the index
7235 * to the flag that the trace option file represents, the trace_array
7236 * has a character array of trace_flags_index[], which holds the index
7237 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7238 * The address of this character array is passed to the flag option file
7239 * read/write callbacks.
7240 *
7241 * In order to extract both the index and the trace_array descriptor,
7242 * get_tr_index() uses the following algorithm.
7243 *
7244 * idx = *ptr;
7245 *
7246 * As the pointer itself contains the address of the index (remember
7247 * index[1] == 1).
7248 *
7249 * Then to get the trace_array descriptor, by subtracting that index
7250 * from the ptr, we get to the start of the index itself.
7251 *
7252 * ptr - idx == &index[0]
7253 *
7254 * Then a simple container_of() from that pointer gets us to the
7255 * trace_array descriptor.
7256 */
7257static void get_tr_index(void *data, struct trace_array **ptr,
7258 unsigned int *pindex)
7259{
7260 *pindex = *(unsigned char *)data;
7261
7262 *ptr = container_of(data - *pindex, struct trace_array,
7263 trace_flags_index);
7264}
7265
Steven Rostedta8259072009-02-26 22:19:12 -05007266static ssize_t
7267trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7268 loff_t *ppos)
7269{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007270 void *tr_index = filp->private_data;
7271 struct trace_array *tr;
7272 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007273 char *buf;
7274
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007275 get_tr_index(tr_index, &tr, &index);
7276
7277 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007278 buf = "1\n";
7279 else
7280 buf = "0\n";
7281
7282 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7283}
7284
7285static ssize_t
7286trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7287 loff_t *ppos)
7288{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007289 void *tr_index = filp->private_data;
7290 struct trace_array *tr;
7291 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007292 unsigned long val;
7293 int ret;
7294
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007295 get_tr_index(tr_index, &tr, &index);
7296
Peter Huewe22fe9b52011-06-07 21:58:27 +02007297 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7298 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007299 return ret;
7300
Zhaoleif2d84b62009-08-07 18:55:48 +08007301 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007302 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007303
7304 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007305 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007306 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007307
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007308 if (ret < 0)
7309 return ret;
7310
Steven Rostedta8259072009-02-26 22:19:12 -05007311 *ppos += cnt;
7312
7313 return cnt;
7314}
7315
Steven Rostedta8259072009-02-26 22:19:12 -05007316static const struct file_operations trace_options_core_fops = {
7317 .open = tracing_open_generic,
7318 .read = trace_options_core_read,
7319 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007320 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007321};
7322
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007323struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007324 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007325 struct dentry *parent,
7326 void *data,
7327 const struct file_operations *fops)
7328{
7329 struct dentry *ret;
7330
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007331 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007332 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007333 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007334
7335 return ret;
7336}
7337
7338
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007339static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007340{
7341 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007342
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007343 if (tr->options)
7344 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007345
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007346 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007347 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007348 return NULL;
7349
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007350 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007351 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007352 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007353 return NULL;
7354 }
7355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007356 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007357}
7358
Steven Rostedt577b7852009-02-26 23:43:05 -05007359static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007360create_trace_option_file(struct trace_array *tr,
7361 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007362 struct tracer_flags *flags,
7363 struct tracer_opt *opt)
7364{
7365 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007366
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007367 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007368 if (!t_options)
7369 return;
7370
7371 topt->flags = flags;
7372 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007373 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007374
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007375 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007376 &trace_options_fops);
7377
Steven Rostedt577b7852009-02-26 23:43:05 -05007378}
7379
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007380static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007381create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007382{
7383 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007384 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007385 struct tracer_flags *flags;
7386 struct tracer_opt *opts;
7387 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007388 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007389
7390 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007391 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007392
7393 flags = tracer->flags;
7394
7395 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007396 return;
7397
7398 /*
7399 * If this is an instance, only create flags for tracers
7400 * the instance may have.
7401 */
7402 if (!trace_ok_for_array(tracer, tr))
7403 return;
7404
7405 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007406 /* Make sure there's no duplicate flags. */
7407 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007408 return;
7409 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007410
7411 opts = flags->opts;
7412
7413 for (cnt = 0; opts[cnt].name; cnt++)
7414 ;
7415
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007416 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007417 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007418 return;
7419
7420 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7421 GFP_KERNEL);
7422 if (!tr_topts) {
7423 kfree(topts);
7424 return;
7425 }
7426
7427 tr->topts = tr_topts;
7428 tr->topts[tr->nr_topts].tracer = tracer;
7429 tr->topts[tr->nr_topts].topts = topts;
7430 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007431
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007432 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007433 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007434 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007435 WARN_ONCE(topts[cnt].entry == NULL,
7436 "Failed to create trace option: %s",
7437 opts[cnt].name);
7438 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007439}
7440
Steven Rostedta8259072009-02-26 22:19:12 -05007441static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007442create_trace_option_core_file(struct trace_array *tr,
7443 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007444{
7445 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007446
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007447 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007448 if (!t_options)
7449 return NULL;
7450
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007451 return trace_create_file(option, 0644, t_options,
7452 (void *)&tr->trace_flags_index[index],
7453 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007454}
7455
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007456static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007457{
7458 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007459 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007460 int i;
7461
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007462 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007463 if (!t_options)
7464 return;
7465
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007466 for (i = 0; trace_options[i]; i++) {
7467 if (top_level ||
7468 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7469 create_trace_option_core_file(tr, trace_options[i], i);
7470 }
Steven Rostedta8259072009-02-26 22:19:12 -05007471}
7472
Steven Rostedt499e5472012-02-22 15:50:28 -05007473static ssize_t
7474rb_simple_read(struct file *filp, char __user *ubuf,
7475 size_t cnt, loff_t *ppos)
7476{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007477 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007478 char buf[64];
7479 int r;
7480
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007481 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007482 r = sprintf(buf, "%d\n", r);
7483
7484 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7485}
7486
7487static ssize_t
7488rb_simple_write(struct file *filp, const char __user *ubuf,
7489 size_t cnt, loff_t *ppos)
7490{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007491 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007492 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007493 unsigned long val;
7494 int ret;
7495
7496 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7497 if (ret)
7498 return ret;
7499
7500 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007501 mutex_lock(&trace_types_lock);
7502 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007503 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007504 if (tr->current_trace->start)
7505 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007506 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007507 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007508 if (tr->current_trace->stop)
7509 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007510 }
7511 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007512 }
7513
7514 (*ppos)++;
7515
7516 return cnt;
7517}
7518
7519static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007520 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007521 .read = rb_simple_read,
7522 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007523 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007524 .llseek = default_llseek,
7525};
7526
Steven Rostedt277ba042012-08-03 16:10:49 -04007527struct dentry *trace_instance_dir;
7528
7529static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007530init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007531
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007532static int
7533allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007534{
7535 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007536
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007537 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007538
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007539 buf->tr = tr;
7540
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007541 buf->buffer = ring_buffer_alloc(size, rb_flags);
7542 if (!buf->buffer)
7543 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007544
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007545 buf->data = alloc_percpu(struct trace_array_cpu);
7546 if (!buf->data) {
7547 ring_buffer_free(buf->buffer);
7548 return -ENOMEM;
7549 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007550
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007551 /* Allocate the first page for all buffers */
7552 set_buffer_entries(&tr->trace_buffer,
7553 ring_buffer_size(tr->trace_buffer.buffer, 0));
7554
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007555 return 0;
7556}
7557
7558static int allocate_trace_buffers(struct trace_array *tr, int size)
7559{
7560 int ret;
7561
7562 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7563 if (ret)
7564 return ret;
7565
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007566#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007567 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7568 allocate_snapshot ? size : 1);
7569 if (WARN_ON(ret)) {
7570 ring_buffer_free(tr->trace_buffer.buffer);
7571 free_percpu(tr->trace_buffer.data);
7572 return -ENOMEM;
7573 }
7574 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007575
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007576 /*
7577 * Only the top level trace array gets its snapshot allocated
7578 * from the kernel command line.
7579 */
7580 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007581#endif
7582 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007583}
7584
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007585static void free_trace_buffer(struct trace_buffer *buf)
7586{
7587 if (buf->buffer) {
7588 ring_buffer_free(buf->buffer);
7589 buf->buffer = NULL;
7590 free_percpu(buf->data);
7591 buf->data = NULL;
7592 }
7593}
7594
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007595static void free_trace_buffers(struct trace_array *tr)
7596{
7597 if (!tr)
7598 return;
7599
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007600 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007601
7602#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007603 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007604#endif
7605}
7606
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007607static void init_trace_flags_index(struct trace_array *tr)
7608{
7609 int i;
7610
7611 /* Used by the trace options files */
7612 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7613 tr->trace_flags_index[i] = i;
7614}
7615
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007616static void __update_tracer_options(struct trace_array *tr)
7617{
7618 struct tracer *t;
7619
7620 for (t = trace_types; t; t = t->next)
7621 add_tracer_options(tr, t);
7622}
7623
7624static void update_tracer_options(struct trace_array *tr)
7625{
7626 mutex_lock(&trace_types_lock);
7627 __update_tracer_options(tr);
7628 mutex_unlock(&trace_types_lock);
7629}
7630
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007631static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007632{
Steven Rostedt277ba042012-08-03 16:10:49 -04007633 struct trace_array *tr;
7634 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007635
7636 mutex_lock(&trace_types_lock);
7637
7638 ret = -EEXIST;
7639 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7640 if (tr->name && strcmp(tr->name, name) == 0)
7641 goto out_unlock;
7642 }
7643
7644 ret = -ENOMEM;
7645 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7646 if (!tr)
7647 goto out_unlock;
7648
7649 tr->name = kstrdup(name, GFP_KERNEL);
7650 if (!tr->name)
7651 goto out_free_tr;
7652
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007653 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7654 goto out_free_tr;
7655
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007656 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007657
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007658 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7659
Steven Rostedt277ba042012-08-03 16:10:49 -04007660 raw_spin_lock_init(&tr->start_lock);
7661
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007662 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7663
Steven Rostedt277ba042012-08-03 16:10:49 -04007664 tr->current_trace = &nop_trace;
7665
7666 INIT_LIST_HEAD(&tr->systems);
7667 INIT_LIST_HEAD(&tr->events);
7668
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007669 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007670 goto out_free_tr;
7671
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007672 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007673 if (!tr->dir)
7674 goto out_free_tr;
7675
7676 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007677 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007678 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007679 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007680 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007681
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007682 ftrace_init_trace_array(tr);
7683
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007684 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007685 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007686 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007687
7688 list_add(&tr->list, &ftrace_trace_arrays);
7689
7690 mutex_unlock(&trace_types_lock);
7691
7692 return 0;
7693
7694 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007695 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007696 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007697 kfree(tr->name);
7698 kfree(tr);
7699
7700 out_unlock:
7701 mutex_unlock(&trace_types_lock);
7702
7703 return ret;
7704
7705}
7706
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007707static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007708{
7709 struct trace_array *tr;
7710 int found = 0;
7711 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007712 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007713
7714 mutex_lock(&trace_types_lock);
7715
7716 ret = -ENODEV;
7717 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7718 if (tr->name && strcmp(tr->name, name) == 0) {
7719 found = 1;
7720 break;
7721 }
7722 }
7723 if (!found)
7724 goto out_unlock;
7725
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007726 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007727 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007728 goto out_unlock;
7729
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007730 list_del(&tr->list);
7731
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007732 /* Disable all the flags that were enabled coming in */
7733 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7734 if ((1 << i) & ZEROED_TRACE_FLAGS)
7735 set_tracer_flag(tr, 1 << i, 0);
7736 }
7737
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007738 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05307739 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007740 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09007741 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007742 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007743 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007744 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007745
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007746 for (i = 0; i < tr->nr_topts; i++) {
7747 kfree(tr->topts[i].topts);
7748 }
7749 kfree(tr->topts);
7750
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007751 kfree(tr->name);
7752 kfree(tr);
7753
7754 ret = 0;
7755
7756 out_unlock:
7757 mutex_unlock(&trace_types_lock);
7758
7759 return ret;
7760}
7761
Steven Rostedt277ba042012-08-03 16:10:49 -04007762static __init void create_trace_instances(struct dentry *d_tracer)
7763{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007764 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7765 instance_mkdir,
7766 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007767 if (WARN_ON(!trace_instance_dir))
7768 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007769}
7770
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007771static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007772init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007773{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007774 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007775
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007776 trace_create_file("available_tracers", 0444, d_tracer,
7777 tr, &show_traces_fops);
7778
7779 trace_create_file("current_tracer", 0644, d_tracer,
7780 tr, &set_tracer_fops);
7781
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007782 trace_create_file("tracing_cpumask", 0644, d_tracer,
7783 tr, &tracing_cpumask_fops);
7784
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007785 trace_create_file("trace_options", 0644, d_tracer,
7786 tr, &tracing_iter_fops);
7787
7788 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007789 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007790
7791 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007792 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007793
7794 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007795 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007796
7797 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7798 tr, &tracing_total_entries_fops);
7799
Wang YanQing238ae932013-05-26 16:52:01 +08007800 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007801 tr, &tracing_free_buffer_fops);
7802
7803 trace_create_file("trace_marker", 0220, d_tracer,
7804 tr, &tracing_mark_fops);
7805
Steven Rostedtfa32e852016-07-06 15:25:08 -04007806 trace_create_file("trace_marker_raw", 0220, d_tracer,
7807 tr, &tracing_mark_raw_fops);
7808
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007809 trace_create_file("trace_clock", 0644, d_tracer, tr,
7810 &trace_clock_fops);
7811
7812 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007813 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007814
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007815 create_trace_options_dir(tr);
7816
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007817#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007818 trace_create_file("tracing_max_latency", 0644, d_tracer,
7819 &tr->max_latency, &tracing_max_lat_fops);
7820#endif
7821
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007822 if (ftrace_create_function_files(tr, d_tracer))
7823 WARN(1, "Could not allocate function filter files");
7824
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007825#ifdef CONFIG_TRACER_SNAPSHOT
7826 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007827 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007828#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007829
7830 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007831 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007832
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007833 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007834}
7835
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007836static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007837{
7838 struct vfsmount *mnt;
7839 struct file_system_type *type;
7840
7841 /*
7842 * To maintain backward compatibility for tools that mount
7843 * debugfs to get to the tracing facility, tracefs is automatically
7844 * mounted to the debugfs/tracing directory.
7845 */
7846 type = get_fs_type("tracefs");
7847 if (!type)
7848 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007849 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007850 put_filesystem(type);
7851 if (IS_ERR(mnt))
7852 return NULL;
7853 mntget(mnt);
7854
7855 return mnt;
7856}
7857
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007858/**
7859 * tracing_init_dentry - initialize top level trace array
7860 *
7861 * This is called when creating files or directories in the tracing
7862 * directory. It is called via fs_initcall() by any of the boot up code
7863 * and expects to return the dentry of the top level tracing directory.
7864 */
7865struct dentry *tracing_init_dentry(void)
7866{
7867 struct trace_array *tr = &global_trace;
7868
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007869 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007870 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007871 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007872
Jiaxing Wang8b129192015-11-06 16:04:16 +08007873 if (WARN_ON(!tracefs_initialized()) ||
7874 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7875 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007876 return ERR_PTR(-ENODEV);
7877
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007878 /*
7879 * As there may still be users that expect the tracing
7880 * files to exist in debugfs/tracing, we must automount
7881 * the tracefs file system there, so older tools still
7882 * work with the newer kerenl.
7883 */
7884 tr->dir = debugfs_create_automount("tracing", NULL,
7885 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007886 if (!tr->dir) {
7887 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7888 return ERR_PTR(-ENOMEM);
7889 }
7890
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007891 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007892}
7893
Jeremy Linton00f4b652017-05-31 16:56:43 -05007894extern struct trace_eval_map *__start_ftrace_eval_maps[];
7895extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007896
Jeremy Linton5f60b352017-05-31 16:56:47 -05007897static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007898{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007899 int len;
7900
Jeremy Linton02fd7f62017-05-31 16:56:42 -05007901 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007902 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007903}
7904
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007905#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007906static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007907{
Jeremy Linton99be6472017-05-31 16:56:44 -05007908 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007909 return;
7910
7911 /*
7912 * Modules with bad taint do not have events created, do
7913 * not bother with enums either.
7914 */
7915 if (trace_module_has_bad_taint(mod))
7916 return;
7917
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007918 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007919}
7920
Jeremy Linton681bec02017-05-31 16:56:53 -05007921#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007922static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007923{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05007924 union trace_eval_map_item *map;
7925 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007926
Jeremy Linton99be6472017-05-31 16:56:44 -05007927 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007928 return;
7929
Jeremy Linton1793ed92017-05-31 16:56:46 -05007930 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007931
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05007932 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007933
7934 while (map) {
7935 if (map->head.mod == mod)
7936 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05007937 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007938 last = &map->tail.next;
7939 map = map->tail.next;
7940 }
7941 if (!map)
7942 goto out;
7943
Jeremy Linton5f60b352017-05-31 16:56:47 -05007944 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007945 kfree(map);
7946 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05007947 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007948}
7949#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007950static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05007951#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007952
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007953static int trace_module_notify(struct notifier_block *self,
7954 unsigned long val, void *data)
7955{
7956 struct module *mod = data;
7957
7958 switch (val) {
7959 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007960 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007961 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007962 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007963 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007964 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007965 }
7966
7967 return 0;
7968}
7969
7970static struct notifier_block trace_module_nb = {
7971 .notifier_call = trace_module_notify,
7972 .priority = 0,
7973};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007974#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007975
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007976static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007977{
7978 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007979
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007980 trace_access_lock_init();
7981
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007982 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007983 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007984 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007985
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007986 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04007987 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007988
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007989 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007990 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007991
Li Zefan339ae5d2009-04-17 10:34:30 +08007992 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007993 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007994
Avadh Patel69abe6a2009-04-10 16:04:48 -04007995 trace_create_file("saved_cmdlines", 0444, d_tracer,
7996 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007997
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007998 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7999 NULL, &tracing_saved_cmdlines_size_fops);
8000
Michael Sartain99c621d2017-07-05 22:07:15 -06008001 trace_create_file("saved_tgids", 0444, d_tracer,
8002 NULL, &tracing_saved_tgids_fops);
8003
Jeremy Linton5f60b352017-05-31 16:56:47 -05008004 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008005
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008006 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008007
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008008#ifdef CONFIG_MODULES
8009 register_module_notifier(&trace_module_nb);
8010#endif
8011
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008012#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008013 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8014 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008015#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008016
Steven Rostedt277ba042012-08-03 16:10:49 -04008017 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09008018
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008019 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05008020
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01008021 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008022}
8023
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008024static int trace_panic_handler(struct notifier_block *this,
8025 unsigned long event, void *unused)
8026{
Steven Rostedt944ac422008-10-23 19:26:08 -04008027 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008028 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008029 return NOTIFY_OK;
8030}
8031
8032static struct notifier_block trace_panic_notifier = {
8033 .notifier_call = trace_panic_handler,
8034 .next = NULL,
8035 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8036};
8037
8038static int trace_die_handler(struct notifier_block *self,
8039 unsigned long val,
8040 void *data)
8041{
8042 switch (val) {
8043 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04008044 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008045 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008046 break;
8047 default:
8048 break;
8049 }
8050 return NOTIFY_OK;
8051}
8052
8053static struct notifier_block trace_die_notifier = {
8054 .notifier_call = trace_die_handler,
8055 .priority = 200
8056};
8057
8058/*
8059 * printk is set to max of 1024, we really don't need it that big.
8060 * Nothing should be printing 1000 characters anyway.
8061 */
8062#define TRACE_MAX_PRINT 1000
8063
8064/*
8065 * Define here KERN_TRACE so that we have one place to modify
8066 * it if we decide to change what log level the ftrace dump
8067 * should be at.
8068 */
Steven Rostedt428aee12009-01-14 12:24:42 -05008069#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008070
Jason Wessel955b61e2010-08-05 09:22:23 -05008071void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008072trace_printk_seq(struct trace_seq *s)
8073{
8074 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008075 if (s->seq.len >= TRACE_MAX_PRINT)
8076 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008077
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05008078 /*
8079 * More paranoid code. Although the buffer size is set to
8080 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8081 * an extra layer of protection.
8082 */
8083 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8084 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008085
8086 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008087 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008088
8089 printk(KERN_TRACE "%s", s->buffer);
8090
Steven Rostedtf9520752009-03-02 14:04:40 -05008091 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008092}
8093
Jason Wessel955b61e2010-08-05 09:22:23 -05008094void trace_init_global_iter(struct trace_iterator *iter)
8095{
8096 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008097 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05008098 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008099 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07008100
8101 if (iter->trace && iter->trace->open)
8102 iter->trace->open(iter);
8103
8104 /* Annotate start of buffers if we had overruns */
8105 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8106 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8107
8108 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8109 if (trace_clocks[iter->tr->clock_id].in_ns)
8110 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05008111}
8112
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008113void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008114{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008115 /* use static because iter can be a bit big for the stack */
8116 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008117 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008118 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008119 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04008120 unsigned long flags;
8121 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008122
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008123 /* Only allow one dump user at a time. */
8124 if (atomic_inc_return(&dump_running) != 1) {
8125 atomic_dec(&dump_running);
8126 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04008127 }
8128
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008129 /*
8130 * Always turn off tracing when we dump.
8131 * We don't need to show trace output of what happens
8132 * between multiple crashes.
8133 *
8134 * If the user does a sysrq-z, then they can re-enable
8135 * tracing with echo 1 > tracing_on.
8136 */
8137 tracing_off();
8138
8139 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008140
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08008141 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05008142 trace_init_global_iter(&iter);
8143
Steven Rostedtd7690412008-10-01 00:29:53 -04008144 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05308145 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04008146 }
8147
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008148 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008149
Török Edwinb54d3de2008-11-22 13:28:48 +02008150 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008151 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02008152
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008153 switch (oops_dump_mode) {
8154 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05008155 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008156 break;
8157 case DUMP_ORIG:
8158 iter.cpu_file = raw_smp_processor_id();
8159 break;
8160 case DUMP_NONE:
8161 goto out_enable;
8162 default:
8163 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05008164 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008165 }
8166
8167 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008168
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008169 /* Did function tracer already get disabled? */
8170 if (ftrace_is_dead()) {
8171 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8172 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8173 }
8174
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008175 /*
8176 * We need to stop all tracing on all CPUS to read the
8177 * the next buffer. This is a bit expensive, but is
8178 * not done often. We fill all what we can read,
8179 * and then release the locks again.
8180 */
8181
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008182 while (!trace_empty(&iter)) {
8183
8184 if (!cnt)
8185 printk(KERN_TRACE "---------------------------------\n");
8186
8187 cnt++;
8188
8189 /* reset all but tr, trace, and overruns */
8190 memset(&iter.seq, 0,
8191 sizeof(struct trace_iterator) -
8192 offsetof(struct trace_iterator, seq));
8193 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8194 iter.pos = -1;
8195
Jason Wessel955b61e2010-08-05 09:22:23 -05008196 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08008197 int ret;
8198
8199 ret = print_trace_line(&iter);
8200 if (ret != TRACE_TYPE_NO_CONSUME)
8201 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008202 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05008203 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008204
8205 trace_printk_seq(&iter.seq);
8206 }
8207
8208 if (!cnt)
8209 printk(KERN_TRACE " (ftrace buffer empty)\n");
8210 else
8211 printk(KERN_TRACE "---------------------------------\n");
8212
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008213 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008214 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008215
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008216 for_each_tracing_cpu(cpu) {
8217 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008218 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008219 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008220 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008221}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008222EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008223
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008224__init static int tracer_alloc_buffers(void)
8225{
Steven Rostedt73c51622009-03-11 13:42:01 -04008226 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308227 int ret = -ENOMEM;
8228
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008229 /*
8230 * Make sure we don't accidently add more trace options
8231 * than we have bits for.
8232 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008233 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008234
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308235 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8236 goto out;
8237
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008238 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308239 goto out_free_buffer_mask;
8240
Steven Rostedt07d777f2011-09-22 14:01:55 -04008241 /* Only allocate trace_printk buffers if a trace_printk exists */
8242 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04008243 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04008244 trace_printk_init_buffers();
8245
Steven Rostedt73c51622009-03-11 13:42:01 -04008246 /* To save memory, keep the ring buffer size to its minimum */
8247 if (ring_buffer_expanded)
8248 ring_buf_size = trace_buf_size;
8249 else
8250 ring_buf_size = 1;
8251
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308252 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008253 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008254
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008255 raw_spin_lock_init(&global_trace.start_lock);
8256
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008257 /*
8258 * The prepare callbacks allocates some memory for the ring buffer. We
8259 * don't free the buffer if the if the CPU goes down. If we were to free
8260 * the buffer, then the user would lose any trace that was in the
8261 * buffer. The memory will be removed once the "instance" is removed.
8262 */
8263 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8264 "trace/RB:preapre", trace_rb_cpu_prepare,
8265 NULL);
8266 if (ret < 0)
8267 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008268 /* Used for event triggers */
8269 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8270 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008271 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008272
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008273 if (trace_create_savedcmd() < 0)
8274 goto out_free_temp_buffer;
8275
Steven Rostedtab464282008-05-12 21:21:00 +02008276 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008277 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008278 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8279 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008280 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008281 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04008282
Steven Rostedt499e5472012-02-22 15:50:28 -05008283 if (global_trace.buffer_disabled)
8284 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008285
Steven Rostedte1e232c2014-02-10 23:38:46 -05008286 if (trace_boot_clock) {
8287 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8288 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008289 pr_warn("Trace clock %s not defined, going back to default\n",
8290 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05008291 }
8292
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008293 /*
8294 * register_tracer() might reference current_trace, so it
8295 * needs to be set before we register anything. This is
8296 * just a bootstrap of current_trace anyway.
8297 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008298 global_trace.current_trace = &nop_trace;
8299
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008300 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8301
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05008302 ftrace_init_global_array_ops(&global_trace);
8303
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008304 init_trace_flags_index(&global_trace);
8305
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008306 register_tracer(&nop_trace);
8307
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05008308 /* Function tracing may start here (via kernel command line) */
8309 init_function_trace();
8310
Steven Rostedt60a11772008-05-12 21:20:44 +02008311 /* All seems OK, enable tracing */
8312 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008313
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008314 atomic_notifier_chain_register(&panic_notifier_list,
8315 &trace_panic_notifier);
8316
8317 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008318
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008319 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8320
8321 INIT_LIST_HEAD(&global_trace.systems);
8322 INIT_LIST_HEAD(&global_trace.events);
8323 list_add(&global_trace.list, &ftrace_trace_arrays);
8324
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08008325 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04008326
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008327 register_snapshot_cmd();
8328
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008329 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008330
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008331out_free_savedcmd:
8332 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008333out_free_temp_buffer:
8334 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008335out_rm_hp_state:
8336 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308337out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008338 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308339out_free_buffer_mask:
8340 free_cpumask_var(tracing_buffer_mask);
8341out:
8342 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008343}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008344
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008345void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008346{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008347 if (tracepoint_printk) {
8348 tracepoint_print_iter =
8349 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8350 if (WARN_ON(!tracepoint_print_iter))
8351 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008352 else
8353 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008354 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008355 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008356}
8357
8358void __init trace_init(void)
8359{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008360 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008361}
8362
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008363__init static int clear_boot_tracer(void)
8364{
8365 /*
8366 * The default tracer at boot buffer is an init section.
8367 * This function is called in lateinit. If we did not
8368 * find the boot tracer, then clear it out, to prevent
8369 * later registration from accessing the buffer that is
8370 * about to be freed.
8371 */
8372 if (!default_bootup_tracer)
8373 return 0;
8374
8375 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8376 default_bootup_tracer);
8377 default_bootup_tracer = NULL;
8378
8379 return 0;
8380}
8381
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008382fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008383late_initcall(clear_boot_tracer);