blob: 551a7cd0d705c8dbfdc893134f97320c53a4a0ca [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080043#include <linux/trace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060044#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020046#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050047#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010049/*
Steven Rostedt73c51622009-03-11 13:42:01 -040050 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050053bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040054
55/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010059 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * at the same time, giving false positive or negative results.
61 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063
Steven Rostedtb2821ae2009-02-02 21:38:32 -050064/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
Li Zefan020e5f82009-07-01 10:47:05 +080067bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050068
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050069/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050072static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050073
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010074/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050079static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010081{
82 return 0;
83}
Steven Rostedt0f048702008-11-05 16:05:44 -050084
85/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040086 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070090static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040091
92/*
Steven Rostedt0f048702008-11-05 16:05:44 -050093 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
Hannes Eder4fd27352009-02-10 19:44:12 +010098static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050099
Jason Wessel955b61e2010-08-05 09:22:23 -0500100cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200101
Steven Rostedt944ac422008-10-23 19:26:08 -0400102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400116 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200117
118enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400119
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
Jeremy Linton681bec02017-05-31 16:56:53 -0500123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
124/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500125struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400126 struct module *mod;
127 unsigned long length;
128};
129
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500130union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400131
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500132struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400133 /*
134 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500135 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400136 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500137 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400138 const char *end; /* points to NULL */
139};
140
Jeremy Linton1793ed92017-05-31 16:56:46 -0500141static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142
143/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500144 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500148 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400149 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500151 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400154};
155
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400158
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500160
Li Zefanee6c2c12009-09-18 14:06:47 +0800161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100164
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165static bool allocate_snapshot;
166
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200167static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100168{
Chen Gang67012ab2013-04-08 12:06:44 +0800169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500170 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400171 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173 return 1;
174}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176
Steven Rostedt944ac422008-10-23 19:26:08 -0400177static int __init set_ftrace_dump_on_oops(char *str)
178{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200192
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400193static int __init stop_trace_on_warning(char *str)
194{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400197 return 1;
198}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200199__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400201static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400208__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500209
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static int __init set_trace_boot_options(char *str)
214{
Chen Gang67012ab2013-04-08 12:06:44 +0800215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
Steven Rostedte1e232c2014-02-10 23:38:46 -0500220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400238
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100239unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200265 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200269
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400270LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400303int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400309 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 return 1;
311 }
312
313 return 0;
314}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500315
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
Steven Rostedtd8275c42016-04-14 12:15:22 -0400322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800365 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400516 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400533 ret = -EINVAL;
534 if (kstrtoul(parser.buffer, 0, &val))
535 break;
536 if (val >= pid_list->pid_max)
537 break;
538
539 pid = (pid_t)val;
540
541 set_bit(pid, pid_list->pids);
542 nr_pids++;
543
544 trace_parser_clear(&parser);
545 ret = 0;
546 }
547 trace_parser_put(&parser);
548
549 if (ret < 0) {
550 trace_free_pid_list(pid_list);
551 return ret;
552 }
553
554 if (!nr_pids) {
555 /* Cleared the list of pids */
556 trace_free_pid_list(pid_list);
557 read = ret;
558 pid_list = NULL;
559 }
560
561 *new_pid_list = pid_list;
562
563 return read;
564}
565
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100566static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400567{
568 u64 ts;
569
570 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700571 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400572 return trace_clock_local();
573
Alexander Z Lam94571582013-08-02 18:36:16 -0700574 ts = ring_buffer_time_stamp(buf->buffer, cpu);
575 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400576
577 return ts;
578}
579
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100580u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700581{
582 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
583}
584
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400585/**
586 * tracing_is_enabled - Show if global_trace has been disabled
587 *
588 * Shows if the global trace has been enabled or not. It uses the
589 * mirror flag "buffer_disabled" to be used in fast paths such as for
590 * the irqsoff tracer. But it may be inaccurate due to races. If you
591 * need to know the accurate state, use tracing_is_on() which is a little
592 * slower, but accurate.
593 */
Steven Rostedt90369902008-11-05 16:05:44 -0500594int tracing_is_enabled(void)
595{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400596 /*
597 * For quick access (irqsoff uses this in fast path), just
598 * return the mirror variable of the state of the ring buffer.
599 * It's a little racy, but we don't really care.
600 */
601 smp_rmb();
602 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500603}
604
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200605/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400606 * trace_buf_size is the size in bytes that is allocated
607 * for a buffer. Note, the number of bytes is always rounded
608 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400609 *
610 * This number is purposely set to a low number of 16384.
611 * If the dump on oops happens, it will be much appreciated
612 * to not have to wait for all that output. Anyway this can be
613 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200614 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400615#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400616
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200618
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200619/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200622/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700625DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800627/*
628 * serialize the access of the ring buffer
629 *
630 * ring buffer serializes readers, but it is low level protection.
631 * The validity of the events (which returns by ring_buffer_peek() ..etc)
632 * are not protected by ring buffer.
633 *
634 * The content of events may become garbage if we allow other process consumes
635 * these events concurrently:
636 * A) the page of the consumed events may become a normal page
637 * (not reader page) in ring buffer, and this page will be rewrited
638 * by events producer.
639 * B) The page of the consumed events may become a page for splice_read,
640 * and this page will be returned to system.
641 *
642 * These primitives allow multi process access to different cpu ring buffer
643 * concurrently.
644 *
645 * These primitives don't distinguish read-only and read-consume access.
646 * Multi read-only access are also serialized.
647 */
648
649#ifdef CONFIG_SMP
650static DECLARE_RWSEM(all_cpu_access_lock);
651static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
652
653static inline void trace_access_lock(int cpu)
654{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500655 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800656 /* gain it for accessing the whole ring buffer. */
657 down_write(&all_cpu_access_lock);
658 } else {
659 /* gain it for accessing a cpu ring buffer. */
660
Steven Rostedtae3b5092013-01-23 15:22:59 -0500661 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800662 down_read(&all_cpu_access_lock);
663
664 /* Secondly block other access to this @cpu ring buffer. */
665 mutex_lock(&per_cpu(cpu_access_lock, cpu));
666 }
667}
668
669static inline void trace_access_unlock(int cpu)
670{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500671 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800672 up_write(&all_cpu_access_lock);
673 } else {
674 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
675 up_read(&all_cpu_access_lock);
676 }
677}
678
679static inline void trace_access_lock_init(void)
680{
681 int cpu;
682
683 for_each_possible_cpu(cpu)
684 mutex_init(&per_cpu(cpu_access_lock, cpu));
685}
686
687#else
688
689static DEFINE_MUTEX(access_lock);
690
691static inline void trace_access_lock(int cpu)
692{
693 (void)cpu;
694 mutex_lock(&access_lock);
695}
696
697static inline void trace_access_unlock(int cpu)
698{
699 (void)cpu;
700 mutex_unlock(&access_lock);
701}
702
703static inline void trace_access_lock_init(void)
704{
705}
706
707#endif
708
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400709#ifdef CONFIG_STACKTRACE
710static void __ftrace_trace_stack(struct ring_buffer *buffer,
711 unsigned long flags,
712 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400713static inline void ftrace_trace_stack(struct trace_array *tr,
714 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400715 unsigned long flags,
716 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400717
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400718#else
719static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
720 unsigned long flags,
721 int skip, int pc, struct pt_regs *regs)
722{
723}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400724static inline void ftrace_trace_stack(struct trace_array *tr,
725 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400726 unsigned long flags,
727 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400728{
729}
730
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400731#endif
732
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500733static __always_inline void
734trace_event_setup(struct ring_buffer_event *event,
735 int type, unsigned long flags, int pc)
736{
737 struct trace_entry *ent = ring_buffer_event_data(event);
738
739 tracing_generic_entry_update(ent, flags, pc);
740 ent->type = type;
741}
742
743static __always_inline struct ring_buffer_event *
744__trace_buffer_lock_reserve(struct ring_buffer *buffer,
745 int type,
746 unsigned long len,
747 unsigned long flags, int pc)
748{
749 struct ring_buffer_event *event;
750
751 event = ring_buffer_lock_reserve(buffer, len);
752 if (event != NULL)
753 trace_event_setup(event, type, flags, pc);
754
755 return event;
756}
757
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400758void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400759{
760 if (tr->trace_buffer.buffer)
761 ring_buffer_record_on(tr->trace_buffer.buffer);
762 /*
763 * This flag is looked at when buffers haven't been allocated
764 * yet, or by some tracers (like irqsoff), that just want to
765 * know if the ring buffer has been disabled, but it can handle
766 * races of where it gets disabled but we still do a record.
767 * As the check is in the fast path of the tracers, it is more
768 * important to be fast than accurate.
769 */
770 tr->buffer_disabled = 0;
771 /* Make the flag seen by readers */
772 smp_wmb();
773}
774
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200775/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500776 * tracing_on - enable tracing buffers
777 *
778 * This function enables tracing buffers that may have been
779 * disabled with tracing_off.
780 */
781void tracing_on(void)
782{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400783 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500784}
785EXPORT_SYMBOL_GPL(tracing_on);
786
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500787
788static __always_inline void
789__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
790{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700791 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500792
793 /* If this is the temp buffer, we need to commit fully */
794 if (this_cpu_read(trace_buffered_event) == event) {
795 /* Length is in event->array[0] */
796 ring_buffer_write(buffer, event->array[0], &event->array[1]);
797 /* Release the temp buffer */
798 this_cpu_dec(trace_buffered_event_cnt);
799 } else
800 ring_buffer_unlock_commit(buffer, event);
801}
802
Steven Rostedt499e5472012-02-22 15:50:28 -0500803/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500804 * __trace_puts - write a constant string into the trace buffer.
805 * @ip: The address of the caller
806 * @str: The constant string to write
807 * @size: The size of the string.
808 */
809int __trace_puts(unsigned long ip, const char *str, int size)
810{
811 struct ring_buffer_event *event;
812 struct ring_buffer *buffer;
813 struct print_entry *entry;
814 unsigned long irq_flags;
815 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800816 int pc;
817
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400818 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800819 return 0;
820
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800821 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500822
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500823 if (unlikely(tracing_selftest_running || tracing_disabled))
824 return 0;
825
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500826 alloc = sizeof(*entry) + size + 2; /* possible \n added */
827
828 local_save_flags(irq_flags);
829 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500830 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
831 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500832 if (!event)
833 return 0;
834
835 entry = ring_buffer_event_data(event);
836 entry->ip = ip;
837
838 memcpy(&entry->buf, str, size);
839
840 /* Add a newline if necessary */
841 if (entry->buf[size - 1] != '\n') {
842 entry->buf[size] = '\n';
843 entry->buf[size + 1] = '\0';
844 } else
845 entry->buf[size] = '\0';
846
847 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400848 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500849
850 return size;
851}
852EXPORT_SYMBOL_GPL(__trace_puts);
853
854/**
855 * __trace_bputs - write the pointer to a constant string into trace buffer
856 * @ip: The address of the caller
857 * @str: The constant string to write to the buffer to
858 */
859int __trace_bputs(unsigned long ip, const char *str)
860{
861 struct ring_buffer_event *event;
862 struct ring_buffer *buffer;
863 struct bputs_entry *entry;
864 unsigned long irq_flags;
865 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800866 int pc;
867
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400868 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800869 return 0;
870
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800871 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500872
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500873 if (unlikely(tracing_selftest_running || tracing_disabled))
874 return 0;
875
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500876 local_save_flags(irq_flags);
877 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500878 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
879 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500880 if (!event)
881 return 0;
882
883 entry = ring_buffer_event_data(event);
884 entry->ip = ip;
885 entry->str = str;
886
887 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400888 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500889
890 return 1;
891}
892EXPORT_SYMBOL_GPL(__trace_bputs);
893
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500894#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400895static void tracing_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500896{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500897 struct tracer *tracer = tr->current_trace;
898 unsigned long flags;
899
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500900 if (in_nmi()) {
901 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
902 internal_trace_puts("*** snapshot is being ignored ***\n");
903 return;
904 }
905
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500906 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500907 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
908 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500909 tracing_off();
910 return;
911 }
912
913 /* Note, snapshot can not be used when the tracer uses it */
914 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500915 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
916 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500917 return;
918 }
919
920 local_irq_save(flags);
921 update_max_tr(tr, current, smp_processor_id());
922 local_irq_restore(flags);
923}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400924
925/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800926 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400927 *
928 * This causes a swap between the snapshot buffer and the current live
929 * tracing buffer. You can use this to take snapshots of the live
930 * trace when some condition is triggered, but continue to trace.
931 *
932 * Note, make sure to allocate the snapshot with either
933 * a tracing_snapshot_alloc(), or by doing it manually
934 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
935 *
936 * If the snapshot buffer is not allocated, it will stop tracing.
937 * Basically making a permanent snapshot.
938 */
939void tracing_snapshot(void)
940{
941 struct trace_array *tr = &global_trace;
942
943 tracing_snapshot_instance(tr);
944}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500945EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500946
947static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
948 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400949static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
950
951static int alloc_snapshot(struct trace_array *tr)
952{
953 int ret;
954
955 if (!tr->allocated_snapshot) {
956
957 /* allocate spare buffer */
958 ret = resize_buffer_duplicate_size(&tr->max_buffer,
959 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
960 if (ret < 0)
961 return ret;
962
963 tr->allocated_snapshot = true;
964 }
965
966 return 0;
967}
968
Fabian Frederickad1438a2014-04-17 21:44:42 +0200969static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400970{
971 /*
972 * We don't free the ring buffer. instead, resize it because
973 * The max_tr ring buffer has some state (e.g. ring->clock) and
974 * we want preserve it.
975 */
976 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
977 set_buffer_entries(&tr->max_buffer, 1);
978 tracing_reset_online_cpus(&tr->max_buffer);
979 tr->allocated_snapshot = false;
980}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500981
982/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500983 * tracing_alloc_snapshot - allocate snapshot buffer.
984 *
985 * This only allocates the snapshot buffer if it isn't already
986 * allocated - it doesn't also take a snapshot.
987 *
988 * This is meant to be used in cases where the snapshot buffer needs
989 * to be set up for events that can't sleep but need to be able to
990 * trigger a snapshot.
991 */
992int tracing_alloc_snapshot(void)
993{
994 struct trace_array *tr = &global_trace;
995 int ret;
996
997 ret = alloc_snapshot(tr);
998 WARN_ON(ret < 0);
999
1000 return ret;
1001}
1002EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1003
1004/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001005 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001006 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001007 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001008 * snapshot buffer if it isn't already allocated. Use this only
1009 * where it is safe to sleep, as the allocation may sleep.
1010 *
1011 * This causes a swap between the snapshot buffer and the current live
1012 * tracing buffer. You can use this to take snapshots of the live
1013 * trace when some condition is triggered, but continue to trace.
1014 */
1015void tracing_snapshot_alloc(void)
1016{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001017 int ret;
1018
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001019 ret = tracing_alloc_snapshot();
1020 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001021 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001022
1023 tracing_snapshot();
1024}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001025EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001026#else
1027void tracing_snapshot(void)
1028{
1029 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1030}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001031EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001032int tracing_alloc_snapshot(void)
1033{
1034 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1035 return -ENODEV;
1036}
1037EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001038void tracing_snapshot_alloc(void)
1039{
1040 /* Give warning */
1041 tracing_snapshot();
1042}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001043EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001044#endif /* CONFIG_TRACER_SNAPSHOT */
1045
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001046void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001047{
1048 if (tr->trace_buffer.buffer)
1049 ring_buffer_record_off(tr->trace_buffer.buffer);
1050 /*
1051 * This flag is looked at when buffers haven't been allocated
1052 * yet, or by some tracers (like irqsoff), that just want to
1053 * know if the ring buffer has been disabled, but it can handle
1054 * races of where it gets disabled but we still do a record.
1055 * As the check is in the fast path of the tracers, it is more
1056 * important to be fast than accurate.
1057 */
1058 tr->buffer_disabled = 1;
1059 /* Make the flag seen by readers */
1060 smp_wmb();
1061}
1062
Steven Rostedt499e5472012-02-22 15:50:28 -05001063/**
1064 * tracing_off - turn off tracing buffers
1065 *
1066 * This function stops the tracing buffers from recording data.
1067 * It does not disable any overhead the tracers themselves may
1068 * be causing. This function simply causes all recording to
1069 * the ring buffers to fail.
1070 */
1071void tracing_off(void)
1072{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001073 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001074}
1075EXPORT_SYMBOL_GPL(tracing_off);
1076
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001077void disable_trace_on_warning(void)
1078{
1079 if (__disable_trace_on_warning)
1080 tracing_off();
1081}
1082
Steven Rostedt499e5472012-02-22 15:50:28 -05001083/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001084 * tracer_tracing_is_on - show real state of ring buffer enabled
1085 * @tr : the trace array to know if ring buffer is enabled
1086 *
1087 * Shows real state of the ring buffer if it is enabled or not.
1088 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001089int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001090{
1091 if (tr->trace_buffer.buffer)
1092 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1093 return !tr->buffer_disabled;
1094}
1095
Steven Rostedt499e5472012-02-22 15:50:28 -05001096/**
1097 * tracing_is_on - show state of ring buffers enabled
1098 */
1099int tracing_is_on(void)
1100{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001101 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001102}
1103EXPORT_SYMBOL_GPL(tracing_is_on);
1104
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001105static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001107 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001108
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001109 if (!str)
1110 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001111 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001112 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001113 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001114 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001115 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001116 return 1;
1117}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001118__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001119
Tim Bird0e950172010-02-25 15:36:43 -08001120static int __init set_tracing_thresh(char *str)
1121{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001122 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001123 int ret;
1124
1125 if (!str)
1126 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001127 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001128 if (ret < 0)
1129 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001130 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001131 return 1;
1132}
1133__setup("tracing_thresh=", set_tracing_thresh);
1134
Steven Rostedt57f50be2008-05-12 21:20:44 +02001135unsigned long nsecs_to_usecs(unsigned long nsecs)
1136{
1137 return nsecs / 1000;
1138}
1139
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001140/*
1141 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001142 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001143 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001144 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001145 */
1146#undef C
1147#define C(a, b) b
1148
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001149/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001150static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001151 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001152 NULL
1153};
1154
Zhaolei5079f322009-08-25 16:12:56 +08001155static struct {
1156 u64 (*func)(void);
1157 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001158 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001159} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001160 { trace_clock_local, "local", 1 },
1161 { trace_clock_global, "global", 1 },
1162 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001163 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001164 { trace_clock, "perf", 1 },
1165 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001166 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Joel Fernandes80ec3552016-11-28 14:35:23 -08001167 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001168 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001169};
1170
Tom Zanussi860f9f62018-01-15 20:51:48 -06001171bool trace_clock_in_ns(struct trace_array *tr)
1172{
1173 if (trace_clocks[tr->clock_id].in_ns)
1174 return true;
1175
1176 return false;
1177}
1178
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001179/*
1180 * trace_parser_get_init - gets the buffer for trace parser
1181 */
1182int trace_parser_get_init(struct trace_parser *parser, int size)
1183{
1184 memset(parser, 0, sizeof(*parser));
1185
1186 parser->buffer = kmalloc(size, GFP_KERNEL);
1187 if (!parser->buffer)
1188 return 1;
1189
1190 parser->size = size;
1191 return 0;
1192}
1193
1194/*
1195 * trace_parser_put - frees the buffer for trace parser
1196 */
1197void trace_parser_put(struct trace_parser *parser)
1198{
1199 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001200 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001201}
1202
1203/*
1204 * trace_get_user - reads the user input string separated by space
1205 * (matched by isspace(ch))
1206 *
1207 * For each string found the 'struct trace_parser' is updated,
1208 * and the function returns.
1209 *
1210 * Returns number of bytes read.
1211 *
1212 * See kernel/trace/trace.h for 'struct trace_parser' details.
1213 */
1214int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1215 size_t cnt, loff_t *ppos)
1216{
1217 char ch;
1218 size_t read = 0;
1219 ssize_t ret;
1220
1221 if (!*ppos)
1222 trace_parser_clear(parser);
1223
1224 ret = get_user(ch, ubuf++);
1225 if (ret)
1226 goto out;
1227
1228 read++;
1229 cnt--;
1230
1231 /*
1232 * The parser is not finished with the last write,
1233 * continue reading the user input without skipping spaces.
1234 */
1235 if (!parser->cont) {
1236 /* skip white space */
1237 while (cnt && isspace(ch)) {
1238 ret = get_user(ch, ubuf++);
1239 if (ret)
1240 goto out;
1241 read++;
1242 cnt--;
1243 }
1244
Changbin Du76638d92018-01-16 17:02:29 +08001245 parser->idx = 0;
1246
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001247 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001248 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001249 *ppos += read;
1250 ret = read;
1251 goto out;
1252 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001253 }
1254
1255 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001256 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001257 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001258 parser->buffer[parser->idx++] = ch;
1259 else {
1260 ret = -EINVAL;
1261 goto out;
1262 }
1263 ret = get_user(ch, ubuf++);
1264 if (ret)
1265 goto out;
1266 read++;
1267 cnt--;
1268 }
1269
1270 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001271 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001272 parser->buffer[parser->idx] = 0;
1273 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001274 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001275 parser->cont = true;
1276 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001277 /* Make sure the parsed string always terminates with '\0'. */
1278 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001279 } else {
1280 ret = -EINVAL;
1281 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001282 }
1283
1284 *ppos += read;
1285 ret = read;
1286
1287out:
1288 return ret;
1289}
1290
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001291/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001292static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001293{
1294 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001295
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001296 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001297 return -EBUSY;
1298
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001299 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001300 if (cnt > len)
1301 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001302 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001303
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001304 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001305 return cnt;
1306}
1307
Tim Bird0e950172010-02-25 15:36:43 -08001308unsigned long __read_mostly tracing_thresh;
1309
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001310#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001311/*
1312 * Copy the new maximum trace into the separate maximum-trace
1313 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001314 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001315 */
1316static void
1317__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1318{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001319 struct trace_buffer *trace_buf = &tr->trace_buffer;
1320 struct trace_buffer *max_buf = &tr->max_buffer;
1321 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1322 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001323
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001324 max_buf->cpu = cpu;
1325 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001326
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001327 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001328 max_data->critical_start = data->critical_start;
1329 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001330
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001331 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001332 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001333 /*
1334 * If tsk == current, then use current_uid(), as that does not use
1335 * RCU. The irq tracer can be called out of RCU scope.
1336 */
1337 if (tsk == current)
1338 max_data->uid = current_uid();
1339 else
1340 max_data->uid = task_uid(tsk);
1341
Steven Rostedt8248ac02009-09-02 12:27:41 -04001342 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1343 max_data->policy = tsk->policy;
1344 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001345
1346 /* record this tasks comm */
1347 tracing_record_cmdline(tsk);
1348}
1349
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001350/**
1351 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1352 * @tr: tracer
1353 * @tsk: the task with the latency
1354 * @cpu: The cpu that initiated the trace.
1355 *
1356 * Flip the buffers between the @tr and the max_tr and record information
1357 * about which task was the cause of this latency.
1358 */
Ingo Molnare309b412008-05-12 21:20:51 +02001359void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001360update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1361{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001362 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001363
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001364 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001365 return;
1366
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001367 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001368
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001369 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001370 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001371 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001372 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001373 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001374
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001375 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001376
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001377 buf = tr->trace_buffer.buffer;
1378 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1379 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001380
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001381 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001382 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001383}
1384
1385/**
1386 * update_max_tr_single - only copy one trace over, and reset the rest
1387 * @tr - tracer
1388 * @tsk - task with the latency
1389 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001390 *
1391 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001392 */
Ingo Molnare309b412008-05-12 21:20:51 +02001393void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001394update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1395{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001396 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001397
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001398 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001399 return;
1400
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001401 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001402 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001403 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001404 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001405 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001406 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001407
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001408 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001409
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001410 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001411
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001412 if (ret == -EBUSY) {
1413 /*
1414 * We failed to swap the buffer due to a commit taking
1415 * place on this CPU. We fail to record, but we reset
1416 * the max trace buffer (no one writes directly to it)
1417 * and flag that it failed.
1418 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001419 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001420 "Failed to swap buffers due to commit in progress\n");
1421 }
1422
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001423 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001424
1425 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001426 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001427}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001428#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001429
Rabin Vincente30f53a2014-11-10 19:46:34 +01001430static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001431{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001432 /* Iterators are static, they should be filled or empty */
1433 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001434 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001435
Rabin Vincente30f53a2014-11-10 19:46:34 +01001436 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1437 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001438}
1439
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001440#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001441static bool selftests_can_run;
1442
1443struct trace_selftests {
1444 struct list_head list;
1445 struct tracer *type;
1446};
1447
1448static LIST_HEAD(postponed_selftests);
1449
1450static int save_selftest(struct tracer *type)
1451{
1452 struct trace_selftests *selftest;
1453
1454 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1455 if (!selftest)
1456 return -ENOMEM;
1457
1458 selftest->type = type;
1459 list_add(&selftest->list, &postponed_selftests);
1460 return 0;
1461}
1462
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001463static int run_tracer_selftest(struct tracer *type)
1464{
1465 struct trace_array *tr = &global_trace;
1466 struct tracer *saved_tracer = tr->current_trace;
1467 int ret;
1468
1469 if (!type->selftest || tracing_selftest_disabled)
1470 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001471
1472 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001473 * If a tracer registers early in boot up (before scheduling is
1474 * initialized and such), then do not run its selftests yet.
1475 * Instead, run it a little later in the boot process.
1476 */
1477 if (!selftests_can_run)
1478 return save_selftest(type);
1479
1480 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001481 * Run a selftest on this tracer.
1482 * Here we reset the trace buffer, and set the current
1483 * tracer to be this tracer. The tracer can then run some
1484 * internal tracing to verify that everything is in order.
1485 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001486 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001487 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001488
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001489 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001490
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001491#ifdef CONFIG_TRACER_MAX_TRACE
1492 if (type->use_max_tr) {
1493 /* If we expanded the buffers, make sure the max is expanded too */
1494 if (ring_buffer_expanded)
1495 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1496 RING_BUFFER_ALL_CPUS);
1497 tr->allocated_snapshot = true;
1498 }
1499#endif
1500
1501 /* the test is responsible for initializing and enabling */
1502 pr_info("Testing tracer %s: ", type->name);
1503 ret = type->selftest(type, tr);
1504 /* the test is responsible for resetting too */
1505 tr->current_trace = saved_tracer;
1506 if (ret) {
1507 printk(KERN_CONT "FAILED!\n");
1508 /* Add the warning after printing 'FAILED' */
1509 WARN_ON(1);
1510 return -1;
1511 }
1512 /* Only reset on passing, to avoid touching corrupted buffers */
1513 tracing_reset_online_cpus(&tr->trace_buffer);
1514
1515#ifdef CONFIG_TRACER_MAX_TRACE
1516 if (type->use_max_tr) {
1517 tr->allocated_snapshot = false;
1518
1519 /* Shrink the max buffer again */
1520 if (ring_buffer_expanded)
1521 ring_buffer_resize(tr->max_buffer.buffer, 1,
1522 RING_BUFFER_ALL_CPUS);
1523 }
1524#endif
1525
1526 printk(KERN_CONT "PASSED\n");
1527 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001529
1530static __init int init_trace_selftests(void)
1531{
1532 struct trace_selftests *p, *n;
1533 struct tracer *t, **last;
1534 int ret;
1535
1536 selftests_can_run = true;
1537
1538 mutex_lock(&trace_types_lock);
1539
1540 if (list_empty(&postponed_selftests))
1541 goto out;
1542
1543 pr_info("Running postponed tracer tests:\n");
1544
1545 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1546 ret = run_tracer_selftest(p->type);
1547 /* If the test fails, then warn and remove from available_tracers */
1548 if (ret < 0) {
1549 WARN(1, "tracer: %s failed selftest, disabling\n",
1550 p->type->name);
1551 last = &trace_types;
1552 for (t = trace_types; t; t = t->next) {
1553 if (t == p->type) {
1554 *last = t->next;
1555 break;
1556 }
1557 last = &t->next;
1558 }
1559 }
1560 list_del(&p->list);
1561 kfree(p);
1562 }
1563
1564 out:
1565 mutex_unlock(&trace_types_lock);
1566
1567 return 0;
1568}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001569core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001570#else
1571static inline int run_tracer_selftest(struct tracer *type)
1572{
1573 return 0;
1574}
1575#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001577static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1578
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001579static void __init apply_trace_boot_options(void);
1580
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001581/**
1582 * register_tracer - register a tracer with the ftrace system.
1583 * @type - the plugin for the tracer
1584 *
1585 * Register a new plugin tracer.
1586 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001587int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588{
1589 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001590 int ret = 0;
1591
1592 if (!type->name) {
1593 pr_info("Tracer must have a name\n");
1594 return -1;
1595 }
1596
Dan Carpenter24a461d2010-07-10 12:06:44 +02001597 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001598 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1599 return -1;
1600 }
1601
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001603
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001604 tracing_selftest_running = true;
1605
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606 for (t = trace_types; t; t = t->next) {
1607 if (strcmp(type->name, t->name) == 0) {
1608 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001609 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001610 type->name);
1611 ret = -1;
1612 goto out;
1613 }
1614 }
1615
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001616 if (!type->set_flag)
1617 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001618 if (!type->flags) {
1619 /*allocate a dummy tracer_flags*/
1620 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001621 if (!type->flags) {
1622 ret = -ENOMEM;
1623 goto out;
1624 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001625 type->flags->val = 0;
1626 type->flags->opts = dummy_tracer_opt;
1627 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001628 if (!type->flags->opts)
1629 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001630
Chunyu Hud39cdd22016-03-08 21:37:01 +08001631 /* store the tracer for __set_tracer_option */
1632 type->flags->trace = type;
1633
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001634 ret = run_tracer_selftest(type);
1635 if (ret < 0)
1636 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001637
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001638 type->next = trace_types;
1639 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001640 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001641
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001642 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001643 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001644 mutex_unlock(&trace_types_lock);
1645
Steven Rostedtdac74942009-02-05 01:13:38 -05001646 if (ret || !default_bootup_tracer)
1647 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001648
Li Zefanee6c2c12009-09-18 14:06:47 +08001649 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001650 goto out_unlock;
1651
1652 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1653 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001654 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001655 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001656
1657 apply_trace_boot_options();
1658
Steven Rostedtdac74942009-02-05 01:13:38 -05001659 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001660 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001661#ifdef CONFIG_FTRACE_STARTUP_TEST
1662 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1663 type->name);
1664#endif
1665
1666 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001667 return ret;
1668}
1669
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001670void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001671{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001672 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001673
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001674 if (!buffer)
1675 return;
1676
Steven Rostedtf6339032009-09-04 12:35:16 -04001677 ring_buffer_record_disable(buffer);
1678
1679 /* Make sure all commits have finished */
1680 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001681 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001682
1683 ring_buffer_record_enable(buffer);
1684}
1685
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001686void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001687{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001688 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001689 int cpu;
1690
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001691 if (!buffer)
1692 return;
1693
Steven Rostedt621968c2009-09-04 12:02:35 -04001694 ring_buffer_record_disable(buffer);
1695
1696 /* Make sure all commits have finished */
1697 synchronize_sched();
1698
Alexander Z Lam94571582013-08-02 18:36:16 -07001699 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001700
1701 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001702 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001703
1704 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001705}
1706
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001707/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001708void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001709{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001710 struct trace_array *tr;
1711
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001712 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04001713 if (!tr->clear_trace)
1714 continue;
1715 tr->clear_trace = false;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001716 tracing_reset_online_cpus(&tr->trace_buffer);
1717#ifdef CONFIG_TRACER_MAX_TRACE
1718 tracing_reset_online_cpus(&tr->max_buffer);
1719#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001720 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001721}
1722
Joel Fernandesd914ba32017-06-26 19:01:55 -07001723static int *tgid_map;
1724
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001725#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001726#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001727static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001728struct saved_cmdlines_buffer {
1729 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1730 unsigned *map_cmdline_to_pid;
1731 unsigned cmdline_num;
1732 int cmdline_idx;
1733 char *saved_cmdlines;
1734};
1735static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001736
Steven Rostedt25b0b442008-05-12 21:21:00 +02001737/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07001738static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001739
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001740static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001741{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001742 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1743}
1744
1745static inline void set_cmdline(int idx, const char *cmdline)
1746{
1747 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1748}
1749
1750static int allocate_cmdlines_buffer(unsigned int val,
1751 struct saved_cmdlines_buffer *s)
1752{
1753 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1754 GFP_KERNEL);
1755 if (!s->map_cmdline_to_pid)
1756 return -ENOMEM;
1757
1758 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1759 if (!s->saved_cmdlines) {
1760 kfree(s->map_cmdline_to_pid);
1761 return -ENOMEM;
1762 }
1763
1764 s->cmdline_idx = 0;
1765 s->cmdline_num = val;
1766 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1767 sizeof(s->map_pid_to_cmdline));
1768 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1769 val * sizeof(*s->map_cmdline_to_pid));
1770
1771 return 0;
1772}
1773
1774static int trace_create_savedcmd(void)
1775{
1776 int ret;
1777
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001778 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001779 if (!savedcmd)
1780 return -ENOMEM;
1781
1782 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1783 if (ret < 0) {
1784 kfree(savedcmd);
1785 savedcmd = NULL;
1786 return -ENOMEM;
1787 }
1788
1789 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001790}
1791
Carsten Emdeb5130b12009-09-13 01:43:07 +02001792int is_tracing_stopped(void)
1793{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001794 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001795}
1796
Steven Rostedt0f048702008-11-05 16:05:44 -05001797/**
1798 * tracing_start - quick start of the tracer
1799 *
1800 * If tracing is enabled but was stopped by tracing_stop,
1801 * this will start the tracer back up.
1802 */
1803void tracing_start(void)
1804{
1805 struct ring_buffer *buffer;
1806 unsigned long flags;
1807
1808 if (tracing_disabled)
1809 return;
1810
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001811 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1812 if (--global_trace.stop_count) {
1813 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001814 /* Someone screwed up their debugging */
1815 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001816 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001817 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001818 goto out;
1819 }
1820
Steven Rostedta2f80712010-03-12 19:56:00 -05001821 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001822 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001823
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001824 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001825 if (buffer)
1826 ring_buffer_record_enable(buffer);
1827
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001828#ifdef CONFIG_TRACER_MAX_TRACE
1829 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001830 if (buffer)
1831 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001832#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001833
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001834 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001835
Steven Rostedt0f048702008-11-05 16:05:44 -05001836 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001837 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1838}
1839
1840static void tracing_start_tr(struct trace_array *tr)
1841{
1842 struct ring_buffer *buffer;
1843 unsigned long flags;
1844
1845 if (tracing_disabled)
1846 return;
1847
1848 /* If global, we need to also start the max tracer */
1849 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1850 return tracing_start();
1851
1852 raw_spin_lock_irqsave(&tr->start_lock, flags);
1853
1854 if (--tr->stop_count) {
1855 if (tr->stop_count < 0) {
1856 /* Someone screwed up their debugging */
1857 WARN_ON_ONCE(1);
1858 tr->stop_count = 0;
1859 }
1860 goto out;
1861 }
1862
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001863 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001864 if (buffer)
1865 ring_buffer_record_enable(buffer);
1866
1867 out:
1868 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001869}
1870
1871/**
1872 * tracing_stop - quick stop of the tracer
1873 *
1874 * Light weight way to stop tracing. Use in conjunction with
1875 * tracing_start.
1876 */
1877void tracing_stop(void)
1878{
1879 struct ring_buffer *buffer;
1880 unsigned long flags;
1881
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001882 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1883 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001884 goto out;
1885
Steven Rostedta2f80712010-03-12 19:56:00 -05001886 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001887 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001888
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001889 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001890 if (buffer)
1891 ring_buffer_record_disable(buffer);
1892
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001893#ifdef CONFIG_TRACER_MAX_TRACE
1894 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001895 if (buffer)
1896 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001897#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001898
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001899 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001900
Steven Rostedt0f048702008-11-05 16:05:44 -05001901 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001902 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1903}
1904
1905static void tracing_stop_tr(struct trace_array *tr)
1906{
1907 struct ring_buffer *buffer;
1908 unsigned long flags;
1909
1910 /* If global, we need to also stop the max tracer */
1911 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1912 return tracing_stop();
1913
1914 raw_spin_lock_irqsave(&tr->start_lock, flags);
1915 if (tr->stop_count++)
1916 goto out;
1917
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001918 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001919 if (buffer)
1920 ring_buffer_record_disable(buffer);
1921
1922 out:
1923 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001924}
1925
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001926static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001927{
Carsten Emdea635cf02009-03-18 09:00:41 +01001928 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001929
Joel Fernandeseaf260a2017-07-06 16:00:21 -07001930 /* treat recording of idle task as a success */
1931 if (!tsk->pid)
1932 return 1;
1933
1934 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001935 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001936
1937 /*
1938 * It's not the end of the world if we don't get
1939 * the lock, but we also don't want to spin
1940 * nor do we want to disable interrupts,
1941 * so if we miss here, then better luck next time.
1942 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001943 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001944 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001945
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001946 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001947 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001948 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001949
Carsten Emdea635cf02009-03-18 09:00:41 +01001950 /*
1951 * Check whether the cmdline buffer at idx has a pid
1952 * mapped. We are going to overwrite that entry so we
1953 * need to clear the map_pid_to_cmdline. Otherwise we
1954 * would read the new comm for the old pid.
1955 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001956 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001957 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001958 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001959
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001960 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1961 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001962
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001963 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001964 }
1965
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001966 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001967
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001968 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001969
1970 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001971}
1972
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001973static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001974{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001975 unsigned map;
1976
Steven Rostedt4ca530852009-03-16 19:20:15 -04001977 if (!pid) {
1978 strcpy(comm, "<idle>");
1979 return;
1980 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001981
Steven Rostedt74bf4072010-01-25 15:11:53 -05001982 if (WARN_ON_ONCE(pid < 0)) {
1983 strcpy(comm, "<XXX>");
1984 return;
1985 }
1986
Steven Rostedt4ca530852009-03-16 19:20:15 -04001987 if (pid > PID_MAX_DEFAULT) {
1988 strcpy(comm, "<...>");
1989 return;
1990 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001991
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001992 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001993 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05301994 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01001995 else
1996 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001997}
1998
1999void trace_find_cmdline(int pid, char comm[])
2000{
2001 preempt_disable();
2002 arch_spin_lock(&trace_cmdline_lock);
2003
2004 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002005
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002006 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002007 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002008}
2009
Joel Fernandesd914ba32017-06-26 19:01:55 -07002010int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002011{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002012 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2013 return 0;
2014
2015 return tgid_map[pid];
2016}
2017
2018static int trace_save_tgid(struct task_struct *tsk)
2019{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002020 /* treat recording of idle task as a success */
2021 if (!tsk->pid)
2022 return 1;
2023
2024 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002025 return 0;
2026
2027 tgid_map[tsk->pid] = tsk->tgid;
2028 return 1;
2029}
2030
2031static bool tracing_record_taskinfo_skip(int flags)
2032{
2033 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2034 return true;
2035 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2036 return true;
2037 if (!__this_cpu_read(trace_taskinfo_save))
2038 return true;
2039 return false;
2040}
2041
2042/**
2043 * tracing_record_taskinfo - record the task info of a task
2044 *
2045 * @task - task to record
2046 * @flags - TRACE_RECORD_CMDLINE for recording comm
2047 * - TRACE_RECORD_TGID for recording tgid
2048 */
2049void tracing_record_taskinfo(struct task_struct *task, int flags)
2050{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002051 bool done;
2052
Joel Fernandesd914ba32017-06-26 19:01:55 -07002053 if (tracing_record_taskinfo_skip(flags))
2054 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002055
2056 /*
2057 * Record as much task information as possible. If some fail, continue
2058 * to try to record the others.
2059 */
2060 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2061 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2062
2063 /* If recording any information failed, retry again soon. */
2064 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002065 return;
2066
Joel Fernandesd914ba32017-06-26 19:01:55 -07002067 __this_cpu_write(trace_taskinfo_save, false);
2068}
2069
2070/**
2071 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2072 *
2073 * @prev - previous task during sched_switch
2074 * @next - next task during sched_switch
2075 * @flags - TRACE_RECORD_CMDLINE for recording comm
2076 * TRACE_RECORD_TGID for recording tgid
2077 */
2078void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2079 struct task_struct *next, int flags)
2080{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002081 bool done;
2082
Joel Fernandesd914ba32017-06-26 19:01:55 -07002083 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002084 return;
2085
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002086 /*
2087 * Record as much task information as possible. If some fail, continue
2088 * to try to record the others.
2089 */
2090 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2091 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2092 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2093 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002094
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002095 /* If recording any information failed, retry again soon. */
2096 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002097 return;
2098
2099 __this_cpu_write(trace_taskinfo_save, false);
2100}
2101
2102/* Helpers to record a specific task information */
2103void tracing_record_cmdline(struct task_struct *task)
2104{
2105 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2106}
2107
2108void tracing_record_tgid(struct task_struct *task)
2109{
2110 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002111}
2112
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002113/*
2114 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2115 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2116 * simplifies those functions and keeps them in sync.
2117 */
2118enum print_line_t trace_handle_return(struct trace_seq *s)
2119{
2120 return trace_seq_has_overflowed(s) ?
2121 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2122}
2123EXPORT_SYMBOL_GPL(trace_handle_return);
2124
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002125void
Steven Rostedt38697052008-10-01 13:14:09 -04002126tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2127 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002128{
2129 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002130
Steven Rostedt777e2082008-09-29 23:02:42 -04002131 entry->preempt_count = pc & 0xff;
2132 entry->pid = (tsk) ? tsk->pid : 0;
2133 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002134#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002135 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002136#else
2137 TRACE_FLAG_IRQS_NOSUPPORT |
2138#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002139 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002140 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302141 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002142 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2143 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002144}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002145EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002146
Steven Rostedte77405a2009-09-02 14:17:06 -04002147struct ring_buffer_event *
2148trace_buffer_lock_reserve(struct ring_buffer *buffer,
2149 int type,
2150 unsigned long len,
2151 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002152{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002153 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002154}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002155
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002156DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2157DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2158static int trace_buffered_event_ref;
2159
2160/**
2161 * trace_buffered_event_enable - enable buffering events
2162 *
2163 * When events are being filtered, it is quicker to use a temporary
2164 * buffer to write the event data into if there's a likely chance
2165 * that it will not be committed. The discard of the ring buffer
2166 * is not as fast as committing, and is much slower than copying
2167 * a commit.
2168 *
2169 * When an event is to be filtered, allocate per cpu buffers to
2170 * write the event data into, and if the event is filtered and discarded
2171 * it is simply dropped, otherwise, the entire data is to be committed
2172 * in one shot.
2173 */
2174void trace_buffered_event_enable(void)
2175{
2176 struct ring_buffer_event *event;
2177 struct page *page;
2178 int cpu;
2179
2180 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2181
2182 if (trace_buffered_event_ref++)
2183 return;
2184
2185 for_each_tracing_cpu(cpu) {
2186 page = alloc_pages_node(cpu_to_node(cpu),
2187 GFP_KERNEL | __GFP_NORETRY, 0);
2188 if (!page)
2189 goto failed;
2190
2191 event = page_address(page);
2192 memset(event, 0, sizeof(*event));
2193
2194 per_cpu(trace_buffered_event, cpu) = event;
2195
2196 preempt_disable();
2197 if (cpu == smp_processor_id() &&
2198 this_cpu_read(trace_buffered_event) !=
2199 per_cpu(trace_buffered_event, cpu))
2200 WARN_ON_ONCE(1);
2201 preempt_enable();
2202 }
2203
2204 return;
2205 failed:
2206 trace_buffered_event_disable();
2207}
2208
2209static void enable_trace_buffered_event(void *data)
2210{
2211 /* Probably not needed, but do it anyway */
2212 smp_rmb();
2213 this_cpu_dec(trace_buffered_event_cnt);
2214}
2215
2216static void disable_trace_buffered_event(void *data)
2217{
2218 this_cpu_inc(trace_buffered_event_cnt);
2219}
2220
2221/**
2222 * trace_buffered_event_disable - disable buffering events
2223 *
2224 * When a filter is removed, it is faster to not use the buffered
2225 * events, and to commit directly into the ring buffer. Free up
2226 * the temp buffers when there are no more users. This requires
2227 * special synchronization with current events.
2228 */
2229void trace_buffered_event_disable(void)
2230{
2231 int cpu;
2232
2233 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2234
2235 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2236 return;
2237
2238 if (--trace_buffered_event_ref)
2239 return;
2240
2241 preempt_disable();
2242 /* For each CPU, set the buffer as used. */
2243 smp_call_function_many(tracing_buffer_mask,
2244 disable_trace_buffered_event, NULL, 1);
2245 preempt_enable();
2246
2247 /* Wait for all current users to finish */
2248 synchronize_sched();
2249
2250 for_each_tracing_cpu(cpu) {
2251 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2252 per_cpu(trace_buffered_event, cpu) = NULL;
2253 }
2254 /*
2255 * Make sure trace_buffered_event is NULL before clearing
2256 * trace_buffered_event_cnt.
2257 */
2258 smp_wmb();
2259
2260 preempt_disable();
2261 /* Do the work on each cpu */
2262 smp_call_function_many(tracing_buffer_mask,
2263 enable_trace_buffered_event, NULL, 1);
2264 preempt_enable();
2265}
2266
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002267static struct ring_buffer *temp_buffer;
2268
Steven Rostedtef5580d2009-02-27 19:38:04 -05002269struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002270trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002271 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002272 int type, unsigned long len,
2273 unsigned long flags, int pc)
2274{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002275 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002276 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002277
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002278 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002279
Tom Zanussi00b41452018-01-15 20:51:39 -06002280 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002281 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2282 (entry = this_cpu_read(trace_buffered_event))) {
2283 /* Try to use the per cpu buffer first */
2284 val = this_cpu_inc_return(trace_buffered_event_cnt);
2285 if (val == 1) {
2286 trace_event_setup(entry, type, flags, pc);
2287 entry->array[0] = len;
2288 return entry;
2289 }
2290 this_cpu_dec(trace_buffered_event_cnt);
2291 }
2292
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002293 entry = __trace_buffer_lock_reserve(*current_rb,
2294 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002295 /*
2296 * If tracing is off, but we have triggers enabled
2297 * we still need to look at the event data. Use the temp_buffer
2298 * to store the trace event for the tigger to use. It's recusive
2299 * safe and will not be recorded anywhere.
2300 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002301 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002302 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002303 entry = __trace_buffer_lock_reserve(*current_rb,
2304 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002305 }
2306 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002307}
2308EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2309
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002310static DEFINE_SPINLOCK(tracepoint_iter_lock);
2311static DEFINE_MUTEX(tracepoint_printk_mutex);
2312
2313static void output_printk(struct trace_event_buffer *fbuffer)
2314{
2315 struct trace_event_call *event_call;
2316 struct trace_event *event;
2317 unsigned long flags;
2318 struct trace_iterator *iter = tracepoint_print_iter;
2319
2320 /* We should never get here if iter is NULL */
2321 if (WARN_ON_ONCE(!iter))
2322 return;
2323
2324 event_call = fbuffer->trace_file->event_call;
2325 if (!event_call || !event_call->event.funcs ||
2326 !event_call->event.funcs->trace)
2327 return;
2328
2329 event = &fbuffer->trace_file->event_call->event;
2330
2331 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2332 trace_seq_init(&iter->seq);
2333 iter->ent = fbuffer->entry;
2334 event_call->event.funcs->trace(iter, 0, event);
2335 trace_seq_putc(&iter->seq, 0);
2336 printk("%s", iter->seq.buffer);
2337
2338 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2339}
2340
2341int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2342 void __user *buffer, size_t *lenp,
2343 loff_t *ppos)
2344{
2345 int save_tracepoint_printk;
2346 int ret;
2347
2348 mutex_lock(&tracepoint_printk_mutex);
2349 save_tracepoint_printk = tracepoint_printk;
2350
2351 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2352
2353 /*
2354 * This will force exiting early, as tracepoint_printk
2355 * is always zero when tracepoint_printk_iter is not allocated
2356 */
2357 if (!tracepoint_print_iter)
2358 tracepoint_printk = 0;
2359
2360 if (save_tracepoint_printk == tracepoint_printk)
2361 goto out;
2362
2363 if (tracepoint_printk)
2364 static_key_enable(&tracepoint_printk_key.key);
2365 else
2366 static_key_disable(&tracepoint_printk_key.key);
2367
2368 out:
2369 mutex_unlock(&tracepoint_printk_mutex);
2370
2371 return ret;
2372}
2373
2374void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2375{
2376 if (static_key_false(&tracepoint_printk_key.key))
2377 output_printk(fbuffer);
2378
2379 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2380 fbuffer->event, fbuffer->entry,
2381 fbuffer->flags, fbuffer->pc);
2382}
2383EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2384
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002385/*
2386 * Skip 3:
2387 *
2388 * trace_buffer_unlock_commit_regs()
2389 * trace_event_buffer_commit()
2390 * trace_event_raw_event_xxx()
2391*/
2392# define STACK_SKIP 3
2393
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002394void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2395 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002396 struct ring_buffer_event *event,
2397 unsigned long flags, int pc,
2398 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002399{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002400 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002401
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002402 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002403 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002404 * Note, we can still get here via blktrace, wakeup tracer
2405 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002406 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002407 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002408 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002409 ftrace_trace_userstack(buffer, flags, pc);
2410}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002411
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002412/*
2413 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2414 */
2415void
2416trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2417 struct ring_buffer_event *event)
2418{
2419 __buffer_unlock_commit(buffer, event);
2420}
2421
Chunyan Zhang478409d2016-11-21 15:57:18 +08002422static void
2423trace_process_export(struct trace_export *export,
2424 struct ring_buffer_event *event)
2425{
2426 struct trace_entry *entry;
2427 unsigned int size = 0;
2428
2429 entry = ring_buffer_event_data(event);
2430 size = ring_buffer_event_length(event);
Felipe Balbia773d412017-06-02 13:20:25 +03002431 export->write(export, entry, size);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002432}
2433
2434static DEFINE_MUTEX(ftrace_export_lock);
2435
2436static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2437
2438static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2439
2440static inline void ftrace_exports_enable(void)
2441{
2442 static_branch_enable(&ftrace_exports_enabled);
2443}
2444
2445static inline void ftrace_exports_disable(void)
2446{
2447 static_branch_disable(&ftrace_exports_enabled);
2448}
2449
2450void ftrace_exports(struct ring_buffer_event *event)
2451{
2452 struct trace_export *export;
2453
2454 preempt_disable_notrace();
2455
2456 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2457 while (export) {
2458 trace_process_export(export, event);
2459 export = rcu_dereference_raw_notrace(export->next);
2460 }
2461
2462 preempt_enable_notrace();
2463}
2464
2465static inline void
2466add_trace_export(struct trace_export **list, struct trace_export *export)
2467{
2468 rcu_assign_pointer(export->next, *list);
2469 /*
2470 * We are entering export into the list but another
2471 * CPU might be walking that list. We need to make sure
2472 * the export->next pointer is valid before another CPU sees
2473 * the export pointer included into the list.
2474 */
2475 rcu_assign_pointer(*list, export);
2476}
2477
2478static inline int
2479rm_trace_export(struct trace_export **list, struct trace_export *export)
2480{
2481 struct trace_export **p;
2482
2483 for (p = list; *p != NULL; p = &(*p)->next)
2484 if (*p == export)
2485 break;
2486
2487 if (*p != export)
2488 return -1;
2489
2490 rcu_assign_pointer(*p, (*p)->next);
2491
2492 return 0;
2493}
2494
2495static inline void
2496add_ftrace_export(struct trace_export **list, struct trace_export *export)
2497{
2498 if (*list == NULL)
2499 ftrace_exports_enable();
2500
2501 add_trace_export(list, export);
2502}
2503
2504static inline int
2505rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2506{
2507 int ret;
2508
2509 ret = rm_trace_export(list, export);
2510 if (*list == NULL)
2511 ftrace_exports_disable();
2512
2513 return ret;
2514}
2515
2516int register_ftrace_export(struct trace_export *export)
2517{
2518 if (WARN_ON_ONCE(!export->write))
2519 return -1;
2520
2521 mutex_lock(&ftrace_export_lock);
2522
2523 add_ftrace_export(&ftrace_exports_list, export);
2524
2525 mutex_unlock(&ftrace_export_lock);
2526
2527 return 0;
2528}
2529EXPORT_SYMBOL_GPL(register_ftrace_export);
2530
2531int unregister_ftrace_export(struct trace_export *export)
2532{
2533 int ret;
2534
2535 mutex_lock(&ftrace_export_lock);
2536
2537 ret = rm_ftrace_export(&ftrace_exports_list, export);
2538
2539 mutex_unlock(&ftrace_export_lock);
2540
2541 return ret;
2542}
2543EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2544
Ingo Molnare309b412008-05-12 21:20:51 +02002545void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002546trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002547 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2548 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002549{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002550 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002551 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002552 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002553 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002554
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002555 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2556 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002557 if (!event)
2558 return;
2559 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002560 entry->ip = ip;
2561 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002562
Chunyan Zhang478409d2016-11-21 15:57:18 +08002563 if (!call_filter_check_discard(call, entry, buffer, event)) {
2564 if (static_branch_unlikely(&ftrace_exports_enabled))
2565 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002566 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002567 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002568}
2569
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002570#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002571
2572#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2573struct ftrace_stack {
2574 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2575};
2576
2577static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2578static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2579
Steven Rostedte77405a2009-09-02 14:17:06 -04002580static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002581 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002582 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002583{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002584 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002585 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002586 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002587 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002588 int use_stack;
2589 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002590
2591 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002592 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002593
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002594 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002595 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002596 * If regs is set, then these functions will not be in the way.
2597 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002598#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002599 if (!regs)
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002600 trace.skip++;
2601#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002602
2603 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002604 * Since events can happen in NMIs there's no safe way to
2605 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2606 * or NMI comes in, it will just have to use the default
2607 * FTRACE_STACK_SIZE.
2608 */
2609 preempt_disable_notrace();
2610
Shan Wei82146522012-11-19 13:21:01 +08002611 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002612 /*
2613 * We don't need any atomic variables, just a barrier.
2614 * If an interrupt comes in, we don't care, because it would
2615 * have exited and put the counter back to what we want.
2616 * We just need a barrier to keep gcc from moving things
2617 * around.
2618 */
2619 barrier();
2620 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002621 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002622 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2623
2624 if (regs)
2625 save_stack_trace_regs(regs, &trace);
2626 else
2627 save_stack_trace(&trace);
2628
2629 if (trace.nr_entries > size)
2630 size = trace.nr_entries;
2631 } else
2632 /* From now on, use_stack is a boolean */
2633 use_stack = 0;
2634
2635 size *= sizeof(unsigned long);
2636
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002637 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2638 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002639 if (!event)
2640 goto out;
2641 entry = ring_buffer_event_data(event);
2642
2643 memset(&entry->caller, 0, size);
2644
2645 if (use_stack)
2646 memcpy(&entry->caller, trace.entries,
2647 trace.nr_entries * sizeof(unsigned long));
2648 else {
2649 trace.max_entries = FTRACE_STACK_ENTRIES;
2650 trace.entries = entry->caller;
2651 if (regs)
2652 save_stack_trace_regs(regs, &trace);
2653 else
2654 save_stack_trace(&trace);
2655 }
2656
2657 entry->size = trace.nr_entries;
2658
Tom Zanussif306cc82013-10-24 08:34:17 -05002659 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002660 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002661
2662 out:
2663 /* Again, don't let gcc optimize things here */
2664 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002665 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002666 preempt_enable_notrace();
2667
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002668}
2669
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002670static inline void ftrace_trace_stack(struct trace_array *tr,
2671 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002672 unsigned long flags,
2673 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002674{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002675 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002676 return;
2677
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002678 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002679}
2680
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002681void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2682 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002683{
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002684 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2685
2686 if (rcu_is_watching()) {
2687 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2688 return;
2689 }
2690
2691 /*
2692 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2693 * but if the above rcu_is_watching() failed, then the NMI
2694 * triggered someplace critical, and rcu_irq_enter() should
2695 * not be called from NMI.
2696 */
2697 if (unlikely(in_nmi()))
2698 return;
2699
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002700 rcu_irq_enter_irqson();
2701 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2702 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04002703}
2704
Steven Rostedt03889382009-12-11 09:48:22 -05002705/**
2706 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002707 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002708 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002709void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002710{
2711 unsigned long flags;
2712
2713 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002714 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002715
2716 local_save_flags(flags);
2717
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002718#ifndef CONFIG_UNWINDER_ORC
2719 /* Skip 1 to skip this function. */
2720 skip++;
2721#endif
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002722 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2723 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002724}
2725
Steven Rostedt91e86e52010-11-10 12:56:12 +01002726static DEFINE_PER_CPU(int, user_stack_count);
2727
Steven Rostedte77405a2009-09-02 14:17:06 -04002728void
2729ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002730{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002731 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002732 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002733 struct userstack_entry *entry;
2734 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002735
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002736 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002737 return;
2738
Steven Rostedtb6345872010-03-12 20:03:30 -05002739 /*
2740 * NMIs can not handle page faults, even with fix ups.
2741 * The save user stack can (and often does) fault.
2742 */
2743 if (unlikely(in_nmi()))
2744 return;
2745
Steven Rostedt91e86e52010-11-10 12:56:12 +01002746 /*
2747 * prevent recursion, since the user stack tracing may
2748 * trigger other kernel events.
2749 */
2750 preempt_disable();
2751 if (__this_cpu_read(user_stack_count))
2752 goto out;
2753
2754 __this_cpu_inc(user_stack_count);
2755
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002756 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2757 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002758 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002759 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002760 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002761
Steven Rostedt48659d32009-09-11 11:36:23 -04002762 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002763 memset(&entry->caller, 0, sizeof(entry->caller));
2764
2765 trace.nr_entries = 0;
2766 trace.max_entries = FTRACE_STACK_ENTRIES;
2767 trace.skip = 0;
2768 trace.entries = entry->caller;
2769
2770 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002771 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002772 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002773
Li Zefan1dbd1952010-12-09 15:47:56 +08002774 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002775 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002776 out:
2777 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002778}
2779
Hannes Eder4fd27352009-02-10 19:44:12 +01002780#ifdef UNUSED
2781static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002782{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002783 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002784}
Hannes Eder4fd27352009-02-10 19:44:12 +01002785#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002786
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002787#endif /* CONFIG_STACKTRACE */
2788
Steven Rostedt07d777f2011-09-22 14:01:55 -04002789/* created for use with alloc_percpu */
2790struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002791 int nesting;
2792 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002793};
2794
2795static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002796
2797/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002798 * Thise allows for lockless recording. If we're nested too deeply, then
2799 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002800 */
2801static char *get_trace_buf(void)
2802{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002803 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002804
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002805 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002806 return NULL;
2807
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002808 buffer->nesting++;
2809
2810 /* Interrupts must see nesting incremented before we use the buffer */
2811 barrier();
2812 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002813}
2814
2815static void put_trace_buf(void)
2816{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002817 /* Don't let the decrement of nesting leak before this */
2818 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002819 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002820}
2821
2822static int alloc_percpu_trace_buffer(void)
2823{
2824 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002825
2826 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002827 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2828 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002829
2830 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002831 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002832}
2833
Steven Rostedt81698832012-10-11 10:15:05 -04002834static int buffers_allocated;
2835
Steven Rostedt07d777f2011-09-22 14:01:55 -04002836void trace_printk_init_buffers(void)
2837{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002838 if (buffers_allocated)
2839 return;
2840
2841 if (alloc_percpu_trace_buffer())
2842 return;
2843
Steven Rostedt2184db42014-05-28 13:14:40 -04002844 /* trace_printk() is for debug use only. Don't use it in production. */
2845
Joe Perchesa395d6a2016-03-22 14:28:09 -07002846 pr_warn("\n");
2847 pr_warn("**********************************************************\n");
2848 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2849 pr_warn("** **\n");
2850 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2851 pr_warn("** **\n");
2852 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2853 pr_warn("** unsafe for production use. **\n");
2854 pr_warn("** **\n");
2855 pr_warn("** If you see this message and you are not debugging **\n");
2856 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2857 pr_warn("** **\n");
2858 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2859 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002860
Steven Rostedtb382ede62012-10-10 21:44:34 -04002861 /* Expand the buffers to set size */
2862 tracing_update_buffers();
2863
Steven Rostedt07d777f2011-09-22 14:01:55 -04002864 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002865
2866 /*
2867 * trace_printk_init_buffers() can be called by modules.
2868 * If that happens, then we need to start cmdline recording
2869 * directly here. If the global_trace.buffer is already
2870 * allocated here, then this was called by module code.
2871 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002872 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002873 tracing_start_cmdline_record();
2874}
2875
2876void trace_printk_start_comm(void)
2877{
2878 /* Start tracing comms if trace printk is set */
2879 if (!buffers_allocated)
2880 return;
2881 tracing_start_cmdline_record();
2882}
2883
2884static void trace_printk_start_stop_comm(int enabled)
2885{
2886 if (!buffers_allocated)
2887 return;
2888
2889 if (enabled)
2890 tracing_start_cmdline_record();
2891 else
2892 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002893}
2894
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002895/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002896 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002897 *
2898 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002899int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002900{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002901 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002902 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002903 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002904 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002905 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002906 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002907 char *tbuffer;
2908 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002909
2910 if (unlikely(tracing_selftest_running || tracing_disabled))
2911 return 0;
2912
2913 /* Don't pollute graph traces with trace_vprintk internals */
2914 pause_graph_tracing();
2915
2916 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002917 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002918
Steven Rostedt07d777f2011-09-22 14:01:55 -04002919 tbuffer = get_trace_buf();
2920 if (!tbuffer) {
2921 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002922 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002923 }
2924
2925 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2926
2927 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002928 goto out;
2929
Steven Rostedt07d777f2011-09-22 14:01:55 -04002930 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002931 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002932 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002933 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2934 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002935 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002936 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002937 entry = ring_buffer_event_data(event);
2938 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002939 entry->fmt = fmt;
2940
Steven Rostedt07d777f2011-09-22 14:01:55 -04002941 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002942 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002943 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002944 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002945 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002946
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002947out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002948 put_trace_buf();
2949
2950out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002951 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002952 unpause_graph_tracing();
2953
2954 return len;
2955}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002956EXPORT_SYMBOL_GPL(trace_vbprintk);
2957
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002958static int
2959__trace_array_vprintk(struct ring_buffer *buffer,
2960 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002961{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002962 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002963 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002964 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002965 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002966 unsigned long flags;
2967 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002968
2969 if (tracing_disabled || tracing_selftest_running)
2970 return 0;
2971
Steven Rostedt07d777f2011-09-22 14:01:55 -04002972 /* Don't pollute graph traces with trace_vprintk internals */
2973 pause_graph_tracing();
2974
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002975 pc = preempt_count();
2976 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002977
Steven Rostedt07d777f2011-09-22 14:01:55 -04002978
2979 tbuffer = get_trace_buf();
2980 if (!tbuffer) {
2981 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002982 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002983 }
2984
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002985 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002986
Steven Rostedt07d777f2011-09-22 14:01:55 -04002987 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002988 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002989 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2990 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002991 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002992 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002993 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002994 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002995
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002996 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002997 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002998 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002999 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003000 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003001
3002out:
3003 put_trace_buf();
3004
3005out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003006 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003007 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003008
3009 return len;
3010}
Steven Rostedt659372d2009-09-03 19:11:07 -04003011
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003012int trace_array_vprintk(struct trace_array *tr,
3013 unsigned long ip, const char *fmt, va_list args)
3014{
3015 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3016}
3017
3018int trace_array_printk(struct trace_array *tr,
3019 unsigned long ip, const char *fmt, ...)
3020{
3021 int ret;
3022 va_list ap;
3023
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003024 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003025 return 0;
3026
3027 va_start(ap, fmt);
3028 ret = trace_array_vprintk(tr, ip, fmt, ap);
3029 va_end(ap);
3030 return ret;
3031}
3032
3033int trace_array_printk_buf(struct ring_buffer *buffer,
3034 unsigned long ip, const char *fmt, ...)
3035{
3036 int ret;
3037 va_list ap;
3038
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003039 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003040 return 0;
3041
3042 va_start(ap, fmt);
3043 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3044 va_end(ap);
3045 return ret;
3046}
3047
Steven Rostedt659372d2009-09-03 19:11:07 -04003048int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3049{
Steven Rostedta813a152009-10-09 01:41:35 -04003050 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003051}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003052EXPORT_SYMBOL_GPL(trace_vprintk);
3053
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003054static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003055{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003056 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3057
Steven Rostedt5a90f572008-09-03 17:42:51 -04003058 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003059 if (buf_iter)
3060 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003061}
3062
Ingo Molnare309b412008-05-12 21:20:51 +02003063static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003064peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3065 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003066{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003067 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003068 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003069
Steven Rostedtd7690412008-10-01 00:29:53 -04003070 if (buf_iter)
3071 event = ring_buffer_iter_peek(buf_iter, ts);
3072 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003073 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003074 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003075
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003076 if (event) {
3077 iter->ent_size = ring_buffer_event_length(event);
3078 return ring_buffer_event_data(event);
3079 }
3080 iter->ent_size = 0;
3081 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003082}
Steven Rostedtd7690412008-10-01 00:29:53 -04003083
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003084static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003085__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3086 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003087{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003088 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003089 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003090 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003091 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003092 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003093 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003094 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003095 int cpu;
3096
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003097 /*
3098 * If we are in a per_cpu trace file, don't bother by iterating over
3099 * all cpu and peek directly.
3100 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003101 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003102 if (ring_buffer_empty_cpu(buffer, cpu_file))
3103 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003104 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003105 if (ent_cpu)
3106 *ent_cpu = cpu_file;
3107
3108 return ent;
3109 }
3110
Steven Rostedtab464282008-05-12 21:21:00 +02003111 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003112
3113 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003114 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003115
Steven Rostedtbc21b472010-03-31 19:49:26 -04003116 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003117
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003118 /*
3119 * Pick the entry with the smallest timestamp:
3120 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003121 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003122 next = ent;
3123 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003124 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003125 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003126 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003127 }
3128 }
3129
Steven Rostedt12b5da32012-03-27 10:43:28 -04003130 iter->ent_size = next_size;
3131
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003132 if (ent_cpu)
3133 *ent_cpu = next_cpu;
3134
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003135 if (ent_ts)
3136 *ent_ts = next_ts;
3137
Steven Rostedtbc21b472010-03-31 19:49:26 -04003138 if (missing_events)
3139 *missing_events = next_lost;
3140
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003141 return next;
3142}
3143
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003144/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003145struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3146 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003147{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003148 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003149}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003150
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003151/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003152void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003153{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003154 iter->ent = __find_next_entry(iter, &iter->cpu,
3155 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003156
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003157 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003158 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003159
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003160 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003161}
3162
Ingo Molnare309b412008-05-12 21:20:51 +02003163static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003164{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003165 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003166 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003167}
3168
Ingo Molnare309b412008-05-12 21:20:51 +02003169static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003170{
3171 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003172 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003173 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003174
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003175 WARN_ON_ONCE(iter->leftover);
3176
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003177 (*pos)++;
3178
3179 /* can't go backwards */
3180 if (iter->idx > i)
3181 return NULL;
3182
3183 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003184 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003185 else
3186 ent = iter;
3187
3188 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003189 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003190
3191 iter->pos = *pos;
3192
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003193 return ent;
3194}
3195
Jason Wessel955b61e2010-08-05 09:22:23 -05003196void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003197{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003198 struct ring_buffer_event *event;
3199 struct ring_buffer_iter *buf_iter;
3200 unsigned long entries = 0;
3201 u64 ts;
3202
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003203 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003204
Steven Rostedt6d158a82012-06-27 20:46:14 -04003205 buf_iter = trace_buffer_iter(iter, cpu);
3206 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003207 return;
3208
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003209 ring_buffer_iter_reset(buf_iter);
3210
3211 /*
3212 * We could have the case with the max latency tracers
3213 * that a reset never took place on a cpu. This is evident
3214 * by the timestamp being before the start of the buffer.
3215 */
3216 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003217 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003218 break;
3219 entries++;
3220 ring_buffer_read(buf_iter, NULL);
3221 }
3222
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003223 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003224}
3225
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003226/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003227 * The current tracer is copied to avoid a global locking
3228 * all around.
3229 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003230static void *s_start(struct seq_file *m, loff_t *pos)
3231{
3232 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003233 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003234 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003235 void *p = NULL;
3236 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003237 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003238
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003239 /*
3240 * copy the tracer to avoid using a global lock all around.
3241 * iter->trace is a copy of current_trace, the pointer to the
3242 * name may be used instead of a strcmp(), as iter->trace->name
3243 * will point to the same string as current_trace->name.
3244 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003245 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003246 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3247 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003248 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003249
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003250#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003251 if (iter->snapshot && iter->trace->use_max_tr)
3252 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003253#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003254
3255 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003256 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003257
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003258 if (*pos != iter->pos) {
3259 iter->ent = NULL;
3260 iter->cpu = 0;
3261 iter->idx = -1;
3262
Steven Rostedtae3b5092013-01-23 15:22:59 -05003263 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003264 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003265 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003266 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003267 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003268
Lai Jiangshanac91d852010-03-02 17:54:50 +08003269 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003270 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3271 ;
3272
3273 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003274 /*
3275 * If we overflowed the seq_file before, then we want
3276 * to just reuse the trace_seq buffer again.
3277 */
3278 if (iter->leftover)
3279 p = iter;
3280 else {
3281 l = *pos - 1;
3282 p = s_next(m, p, &l);
3283 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003284 }
3285
Lai Jiangshan4f535962009-05-18 19:35:34 +08003286 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003287 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003288 return p;
3289}
3290
3291static void s_stop(struct seq_file *m, void *p)
3292{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003293 struct trace_iterator *iter = m->private;
3294
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003295#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003296 if (iter->snapshot && iter->trace->use_max_tr)
3297 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003298#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003299
3300 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003301 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003302
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003303 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003304 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003305}
3306
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003307static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003308get_total_entries(struct trace_buffer *buf,
3309 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003310{
3311 unsigned long count;
3312 int cpu;
3313
3314 *total = 0;
3315 *entries = 0;
3316
3317 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003318 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003319 /*
3320 * If this buffer has skipped entries, then we hold all
3321 * entries for the trace and we need to ignore the
3322 * ones before the time stamp.
3323 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003324 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3325 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003326 /* total is the same as the entries */
3327 *total += count;
3328 } else
3329 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003330 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003331 *entries += count;
3332 }
3333}
3334
Ingo Molnare309b412008-05-12 21:20:51 +02003335static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003336{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003337 seq_puts(m, "# _------=> CPU# \n"
3338 "# / _-----=> irqs-off \n"
3339 "# | / _----=> need-resched \n"
3340 "# || / _---=> hardirq/softirq \n"
3341 "# ||| / _--=> preempt-depth \n"
3342 "# |||| / delay \n"
3343 "# cmd pid ||||| time | caller \n"
3344 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003345}
3346
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003347static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003348{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003349 unsigned long total;
3350 unsigned long entries;
3351
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003352 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003353 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3354 entries, total, num_online_cpus());
3355 seq_puts(m, "#\n");
3356}
3357
Joel Fernandes441dae82017-06-25 22:38:43 -07003358static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3359 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003360{
Joel Fernandes441dae82017-06-25 22:38:43 -07003361 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3362
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003363 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003364
3365 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3366 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003367}
3368
Joel Fernandes441dae82017-06-25 22:38:43 -07003369static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3370 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003371{
Joel Fernandes441dae82017-06-25 22:38:43 -07003372 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003373 const char tgid_space[] = " ";
3374 const char space[] = " ";
Joel Fernandes441dae82017-06-25 22:38:43 -07003375
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003376 seq_printf(m, "# %s _-----=> irqs-off\n",
3377 tgid ? tgid_space : space);
3378 seq_printf(m, "# %s / _----=> need-resched\n",
3379 tgid ? tgid_space : space);
3380 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3381 tgid ? tgid_space : space);
3382 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3383 tgid ? tgid_space : space);
3384 seq_printf(m, "# %s||| / delay\n",
3385 tgid ? tgid_space : space);
3386 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3387 tgid ? " TGID " : space);
3388 seq_printf(m, "# | | | %s|||| | |\n",
3389 tgid ? " | " : space);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003390}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003391
Jiri Olsa62b915f2010-04-02 19:01:22 +02003392void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003393print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3394{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003395 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003396 struct trace_buffer *buf = iter->trace_buffer;
3397 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003398 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003399 unsigned long entries;
3400 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003401 const char *name = "preemption";
3402
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003403 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003404
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003405 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003406
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003407 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003408 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003409 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003410 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003411 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003412 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003413 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003414 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003415 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003416 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003417#if defined(CONFIG_PREEMPT_NONE)
3418 "server",
3419#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3420 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003421#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003422 "preempt",
3423#else
3424 "unknown",
3425#endif
3426 /* These are reserved for later use */
3427 0, 0, 0, 0);
3428#ifdef CONFIG_SMP
3429 seq_printf(m, " #P:%d)\n", num_online_cpus());
3430#else
3431 seq_puts(m, ")\n");
3432#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003433 seq_puts(m, "# -----------------\n");
3434 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003436 data->comm, data->pid,
3437 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003438 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003439 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003440
3441 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003442 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003443 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3444 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003445 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003446 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3447 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003448 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003449 }
3450
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003451 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003452}
3453
Steven Rostedta3097202008-11-07 22:36:02 -05003454static void test_cpu_buff_start(struct trace_iterator *iter)
3455{
3456 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003457 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003458
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003459 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003460 return;
3461
3462 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3463 return;
3464
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003465 if (cpumask_available(iter->started) &&
3466 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003467 return;
3468
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003469 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003470 return;
3471
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003472 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003473 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003474
3475 /* Don't print started cpu buffer for the first entry of the trace */
3476 if (iter->idx > 1)
3477 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3478 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003479}
3480
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003481static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003482{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003483 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003484 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003485 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003486 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003487 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003488
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003489 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003490
Steven Rostedta3097202008-11-07 22:36:02 -05003491 test_cpu_buff_start(iter);
3492
Steven Rostedtf633cef2008-12-23 23:24:13 -05003493 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003494
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003495 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003496 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3497 trace_print_lat_context(iter);
3498 else
3499 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003500 }
3501
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003502 if (trace_seq_has_overflowed(s))
3503 return TRACE_TYPE_PARTIAL_LINE;
3504
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003505 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003506 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003507
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003508 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003509
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003510 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003511}
3512
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003513static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003514{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003515 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003516 struct trace_seq *s = &iter->seq;
3517 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003518 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003519
3520 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003521
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003522 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003523 trace_seq_printf(s, "%d %d %llu ",
3524 entry->pid, iter->cpu, iter->ts);
3525
3526 if (trace_seq_has_overflowed(s))
3527 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003528
Steven Rostedtf633cef2008-12-23 23:24:13 -05003529 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003530 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003531 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003532
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003533 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003534
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003535 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003536}
3537
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003538static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003539{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003540 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003541 struct trace_seq *s = &iter->seq;
3542 unsigned char newline = '\n';
3543 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003544 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003545
3546 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003547
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003548 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003549 SEQ_PUT_HEX_FIELD(s, entry->pid);
3550 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3551 SEQ_PUT_HEX_FIELD(s, iter->ts);
3552 if (trace_seq_has_overflowed(s))
3553 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003554 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003555
Steven Rostedtf633cef2008-12-23 23:24:13 -05003556 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003557 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003558 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003559 if (ret != TRACE_TYPE_HANDLED)
3560 return ret;
3561 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003562
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003563 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003564
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003565 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003566}
3567
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003568static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003569{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003570 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003571 struct trace_seq *s = &iter->seq;
3572 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003573 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003574
3575 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003576
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003577 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003578 SEQ_PUT_FIELD(s, entry->pid);
3579 SEQ_PUT_FIELD(s, iter->cpu);
3580 SEQ_PUT_FIELD(s, iter->ts);
3581 if (trace_seq_has_overflowed(s))
3582 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003583 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003584
Steven Rostedtf633cef2008-12-23 23:24:13 -05003585 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003586 return event ? event->funcs->binary(iter, 0, event) :
3587 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003588}
3589
Jiri Olsa62b915f2010-04-02 19:01:22 +02003590int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003591{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003592 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003593 int cpu;
3594
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003595 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003596 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003597 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003598 buf_iter = trace_buffer_iter(iter, cpu);
3599 if (buf_iter) {
3600 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003601 return 0;
3602 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003603 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003604 return 0;
3605 }
3606 return 1;
3607 }
3608
Steven Rostedtab464282008-05-12 21:21:00 +02003609 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003610 buf_iter = trace_buffer_iter(iter, cpu);
3611 if (buf_iter) {
3612 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003613 return 0;
3614 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003615 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003616 return 0;
3617 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003618 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003619
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003620 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003621}
3622
Lai Jiangshan4f535962009-05-18 19:35:34 +08003623/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003624enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003625{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003626 struct trace_array *tr = iter->tr;
3627 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003628 enum print_line_t ret;
3629
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003630 if (iter->lost_events) {
3631 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3632 iter->cpu, iter->lost_events);
3633 if (trace_seq_has_overflowed(&iter->seq))
3634 return TRACE_TYPE_PARTIAL_LINE;
3635 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003636
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003637 if (iter->trace && iter->trace->print_line) {
3638 ret = iter->trace->print_line(iter);
3639 if (ret != TRACE_TYPE_UNHANDLED)
3640 return ret;
3641 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003642
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003643 if (iter->ent->type == TRACE_BPUTS &&
3644 trace_flags & TRACE_ITER_PRINTK &&
3645 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3646 return trace_print_bputs_msg_only(iter);
3647
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003648 if (iter->ent->type == TRACE_BPRINT &&
3649 trace_flags & TRACE_ITER_PRINTK &&
3650 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003651 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003652
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003653 if (iter->ent->type == TRACE_PRINT &&
3654 trace_flags & TRACE_ITER_PRINTK &&
3655 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003656 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003657
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003658 if (trace_flags & TRACE_ITER_BIN)
3659 return print_bin_fmt(iter);
3660
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003661 if (trace_flags & TRACE_ITER_HEX)
3662 return print_hex_fmt(iter);
3663
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003664 if (trace_flags & TRACE_ITER_RAW)
3665 return print_raw_fmt(iter);
3666
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003667 return print_trace_fmt(iter);
3668}
3669
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003670void trace_latency_header(struct seq_file *m)
3671{
3672 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003673 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003674
3675 /* print nothing if the buffers are empty */
3676 if (trace_empty(iter))
3677 return;
3678
3679 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3680 print_trace_header(m, iter);
3681
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003682 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003683 print_lat_help_header(m);
3684}
3685
Jiri Olsa62b915f2010-04-02 19:01:22 +02003686void trace_default_header(struct seq_file *m)
3687{
3688 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003689 struct trace_array *tr = iter->tr;
3690 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003691
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003692 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3693 return;
3694
Jiri Olsa62b915f2010-04-02 19:01:22 +02003695 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3696 /* print nothing if the buffers are empty */
3697 if (trace_empty(iter))
3698 return;
3699 print_trace_header(m, iter);
3700 if (!(trace_flags & TRACE_ITER_VERBOSE))
3701 print_lat_help_header(m);
3702 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003703 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3704 if (trace_flags & TRACE_ITER_IRQ_INFO)
Joel Fernandes441dae82017-06-25 22:38:43 -07003705 print_func_help_header_irq(iter->trace_buffer,
3706 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003707 else
Joel Fernandes441dae82017-06-25 22:38:43 -07003708 print_func_help_header(iter->trace_buffer, m,
3709 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003710 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003711 }
3712}
3713
Steven Rostedte0a413f2011-09-29 21:26:16 -04003714static void test_ftrace_alive(struct seq_file *m)
3715{
3716 if (!ftrace_is_dead())
3717 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003718 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3719 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003720}
3721
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003722#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003723static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003724{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003725 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3726 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3727 "# Takes a snapshot of the main buffer.\n"
3728 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3729 "# (Doesn't have to be '2' works with any number that\n"
3730 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003731}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003732
3733static void show_snapshot_percpu_help(struct seq_file *m)
3734{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003735 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003736#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003737 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3738 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003739#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003740 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3741 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003742#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003743 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3744 "# (Doesn't have to be '2' works with any number that\n"
3745 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003746}
3747
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003748static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3749{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003750 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003751 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003752 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003753 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003754
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003755 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003756 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3757 show_snapshot_main_help(m);
3758 else
3759 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003760}
3761#else
3762/* Should never be called */
3763static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3764#endif
3765
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003766static int s_show(struct seq_file *m, void *v)
3767{
3768 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003769 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003770
3771 if (iter->ent == NULL) {
3772 if (iter->tr) {
3773 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3774 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003775 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003776 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003777 if (iter->snapshot && trace_empty(iter))
3778 print_snapshot_help(m, iter);
3779 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003780 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003781 else
3782 trace_default_header(m);
3783
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003784 } else if (iter->leftover) {
3785 /*
3786 * If we filled the seq_file buffer earlier, we
3787 * want to just show it now.
3788 */
3789 ret = trace_print_seq(m, &iter->seq);
3790
3791 /* ret should this time be zero, but you never know */
3792 iter->leftover = ret;
3793
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003794 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003795 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003796 ret = trace_print_seq(m, &iter->seq);
3797 /*
3798 * If we overflow the seq_file buffer, then it will
3799 * ask us for this data again at start up.
3800 * Use that instead.
3801 * ret is 0 if seq_file write succeeded.
3802 * -1 otherwise.
3803 */
3804 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003805 }
3806
3807 return 0;
3808}
3809
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003810/*
3811 * Should be used after trace_array_get(), trace_types_lock
3812 * ensures that i_cdev was already initialized.
3813 */
3814static inline int tracing_get_cpu(struct inode *inode)
3815{
3816 if (inode->i_cdev) /* See trace_create_cpu_file() */
3817 return (long)inode->i_cdev - 1;
3818 return RING_BUFFER_ALL_CPUS;
3819}
3820
James Morris88e9d342009-09-22 16:43:43 -07003821static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003822 .start = s_start,
3823 .next = s_next,
3824 .stop = s_stop,
3825 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003826};
3827
Ingo Molnare309b412008-05-12 21:20:51 +02003828static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003829__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003830{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003831 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003832 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003833 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003834
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003835 if (tracing_disabled)
3836 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003837
Jiri Olsa50e18b92012-04-25 10:23:39 +02003838 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003839 if (!iter)
3840 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003841
Gil Fruchter72917232015-06-09 10:32:35 +03003842 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003843 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003844 if (!iter->buffer_iter)
3845 goto release;
3846
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003847 /*
3848 * We make a copy of the current tracer to avoid concurrent
3849 * changes on it while we are reading.
3850 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003851 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003852 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003853 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003854 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003855
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003856 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003857
Li Zefan79f55992009-06-15 14:58:26 +08003858 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003859 goto fail;
3860
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003861 iter->tr = tr;
3862
3863#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003864 /* Currently only the top directory has a snapshot */
3865 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003866 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003867 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003868#endif
3869 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003870 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003871 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003872 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003873 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003874
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003875 /* Notify the tracer early; before we stop tracing. */
3876 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003877 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003878
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003879 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003880 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003881 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3882
David Sharp8be07092012-11-13 12:18:22 -08003883 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003884 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003885 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3886
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003887 /* stop the trace while dumping if we are not opening "snapshot" */
3888 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003889 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003890
Steven Rostedtae3b5092013-01-23 15:22:59 -05003891 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003892 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003893 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003894 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003895 }
3896 ring_buffer_read_prepare_sync();
3897 for_each_tracing_cpu(cpu) {
3898 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003899 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003900 }
3901 } else {
3902 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003903 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003904 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003905 ring_buffer_read_prepare_sync();
3906 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003907 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003908 }
3909
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003910 mutex_unlock(&trace_types_lock);
3911
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003912 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003913
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003914 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003915 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003916 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003917 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003918release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003919 seq_release_private(inode, file);
3920 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003921}
3922
3923int tracing_open_generic(struct inode *inode, struct file *filp)
3924{
Steven Rostedt60a11772008-05-12 21:20:44 +02003925 if (tracing_disabled)
3926 return -ENODEV;
3927
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003928 filp->private_data = inode->i_private;
3929 return 0;
3930}
3931
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003932bool tracing_is_disabled(void)
3933{
3934 return (tracing_disabled) ? true: false;
3935}
3936
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003937/*
3938 * Open and update trace_array ref count.
3939 * Must have the current trace_array passed to it.
3940 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003941static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003942{
3943 struct trace_array *tr = inode->i_private;
3944
3945 if (tracing_disabled)
3946 return -ENODEV;
3947
3948 if (trace_array_get(tr) < 0)
3949 return -ENODEV;
3950
3951 filp->private_data = inode->i_private;
3952
3953 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003954}
3955
Hannes Eder4fd27352009-02-10 19:44:12 +01003956static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003957{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003958 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003959 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003960 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003961 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003962
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003963 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003964 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003965 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003966 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003967
Oleg Nesterov6484c712013-07-23 17:26:10 +02003968 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003969 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003970 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003971
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003972 for_each_tracing_cpu(cpu) {
3973 if (iter->buffer_iter[cpu])
3974 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3975 }
3976
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003977 if (iter->trace && iter->trace->close)
3978 iter->trace->close(iter);
3979
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003980 if (!iter->snapshot)
3981 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003982 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003983
3984 __trace_array_put(tr);
3985
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003986 mutex_unlock(&trace_types_lock);
3987
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003988 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003989 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003990 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003991 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003992 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003993
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003994 return 0;
3995}
3996
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003997static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3998{
3999 struct trace_array *tr = inode->i_private;
4000
4001 trace_array_put(tr);
4002 return 0;
4003}
4004
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004005static int tracing_single_release_tr(struct inode *inode, struct file *file)
4006{
4007 struct trace_array *tr = inode->i_private;
4008
4009 trace_array_put(tr);
4010
4011 return single_release(inode, file);
4012}
4013
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004014static int tracing_open(struct inode *inode, struct file *file)
4015{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004016 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004017 struct trace_iterator *iter;
4018 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004019
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004020 if (trace_array_get(tr) < 0)
4021 return -ENODEV;
4022
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004023 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004024 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4025 int cpu = tracing_get_cpu(inode);
Bo Yan8dd33bc2017-09-18 10:03:35 -07004026 struct trace_buffer *trace_buf = &tr->trace_buffer;
4027
4028#ifdef CONFIG_TRACER_MAX_TRACE
4029 if (tr->current_trace->print_max)
4030 trace_buf = &tr->max_buffer;
4031#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004032
4033 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004034 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004035 else
Bo Yan8dd33bc2017-09-18 10:03:35 -07004036 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004037 }
4038
4039 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004040 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004041 if (IS_ERR(iter))
4042 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004043 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004044 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4045 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004046
4047 if (ret < 0)
4048 trace_array_put(tr);
4049
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004050 return ret;
4051}
4052
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004053/*
4054 * Some tracers are not suitable for instance buffers.
4055 * A tracer is always available for the global array (toplevel)
4056 * or if it explicitly states that it is.
4057 */
4058static bool
4059trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4060{
4061 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4062}
4063
4064/* Find the next tracer that this trace array may use */
4065static struct tracer *
4066get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4067{
4068 while (t && !trace_ok_for_array(t, tr))
4069 t = t->next;
4070
4071 return t;
4072}
4073
Ingo Molnare309b412008-05-12 21:20:51 +02004074static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004075t_next(struct seq_file *m, void *v, loff_t *pos)
4076{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004077 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004078 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004079
4080 (*pos)++;
4081
4082 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004083 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004084
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004085 return t;
4086}
4087
4088static void *t_start(struct seq_file *m, loff_t *pos)
4089{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004090 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004091 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004092 loff_t l = 0;
4093
4094 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004095
4096 t = get_tracer_for_array(tr, trace_types);
4097 for (; t && l < *pos; t = t_next(m, t, &l))
4098 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004099
4100 return t;
4101}
4102
4103static void t_stop(struct seq_file *m, void *p)
4104{
4105 mutex_unlock(&trace_types_lock);
4106}
4107
4108static int t_show(struct seq_file *m, void *v)
4109{
4110 struct tracer *t = v;
4111
4112 if (!t)
4113 return 0;
4114
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004115 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004116 if (t->next)
4117 seq_putc(m, ' ');
4118 else
4119 seq_putc(m, '\n');
4120
4121 return 0;
4122}
4123
James Morris88e9d342009-09-22 16:43:43 -07004124static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004125 .start = t_start,
4126 .next = t_next,
4127 .stop = t_stop,
4128 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004129};
4130
4131static int show_traces_open(struct inode *inode, struct file *file)
4132{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004133 struct trace_array *tr = inode->i_private;
4134 struct seq_file *m;
4135 int ret;
4136
Steven Rostedt60a11772008-05-12 21:20:44 +02004137 if (tracing_disabled)
4138 return -ENODEV;
4139
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004140 ret = seq_open(file, &show_traces_seq_ops);
4141 if (ret)
4142 return ret;
4143
4144 m = file->private_data;
4145 m->private = tr;
4146
4147 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004148}
4149
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004150static ssize_t
4151tracing_write_stub(struct file *filp, const char __user *ubuf,
4152 size_t count, loff_t *ppos)
4153{
4154 return count;
4155}
4156
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004157loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004158{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004159 int ret;
4160
Slava Pestov364829b2010-11-24 15:13:16 -08004161 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004162 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004163 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004164 file->f_pos = ret = 0;
4165
4166 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004167}
4168
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004169static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004170 .open = tracing_open,
4171 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004172 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004173 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004174 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004175};
4176
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004177static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004178 .open = show_traces_open,
4179 .read = seq_read,
4180 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004181 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004182};
4183
4184static ssize_t
4185tracing_cpumask_read(struct file *filp, char __user *ubuf,
4186 size_t count, loff_t *ppos)
4187{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004188 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004189 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004190 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004191
Changbin Du90e406f2017-11-30 11:39:43 +08004192 len = snprintf(NULL, 0, "%*pb\n",
4193 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4194 mask_str = kmalloc(len, GFP_KERNEL);
4195 if (!mask_str)
4196 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004197
Changbin Du90e406f2017-11-30 11:39:43 +08004198 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004199 cpumask_pr_args(tr->tracing_cpumask));
4200 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004201 count = -EINVAL;
4202 goto out_err;
4203 }
Changbin Du90e406f2017-11-30 11:39:43 +08004204 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004205
4206out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004207 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004208
4209 return count;
4210}
4211
4212static ssize_t
4213tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4214 size_t count, loff_t *ppos)
4215{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004216 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304217 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004218 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304219
4220 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4221 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004222
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304223 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004224 if (err)
4225 goto err_unlock;
4226
Steven Rostedta5e25882008-12-02 15:34:05 -05004227 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004228 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004229 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004230 /*
4231 * Increase/decrease the disabled counter if we are
4232 * about to flip a bit in the cpumask:
4233 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004234 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304235 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004236 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4237 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004238 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004239 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304240 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004241 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4242 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004243 }
4244 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004245 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004246 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004247
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004248 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304249 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004250
Ingo Molnarc7078de2008-05-12 21:20:52 +02004251 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004252
4253err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004254 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004255
4256 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004257}
4258
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004259static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004260 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004261 .read = tracing_cpumask_read,
4262 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004263 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004264 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004265};
4266
Li Zefanfdb372e2009-12-08 11:15:59 +08004267static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004268{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004269 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004270 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004271 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004272 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004273
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004274 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004275 tracer_flags = tr->current_trace->flags->val;
4276 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004277
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004278 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004279 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004280 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004281 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004282 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004283 }
4284
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004285 for (i = 0; trace_opts[i].name; i++) {
4286 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004287 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004288 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004289 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004290 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004291 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004292
Li Zefanfdb372e2009-12-08 11:15:59 +08004293 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004294}
4295
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004296static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004297 struct tracer_flags *tracer_flags,
4298 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004299{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004300 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004301 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004302
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004303 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004304 if (ret)
4305 return ret;
4306
4307 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004308 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004309 else
Zhaolei77708412009-08-07 18:53:21 +08004310 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004311 return 0;
4312}
4313
Li Zefan8d18eaa2009-12-08 11:17:06 +08004314/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004315static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004316{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004317 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004318 struct tracer_flags *tracer_flags = trace->flags;
4319 struct tracer_opt *opts = NULL;
4320 int i;
4321
4322 for (i = 0; tracer_flags->opts[i].name; i++) {
4323 opts = &tracer_flags->opts[i];
4324
4325 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004326 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004327 }
4328
4329 return -EINVAL;
4330}
4331
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004332/* Some tracers require overwrite to stay enabled */
4333int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4334{
4335 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4336 return -1;
4337
4338 return 0;
4339}
4340
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004341int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004342{
4343 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004344 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004345 return 0;
4346
4347 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004348 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004349 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004350 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004351
4352 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004353 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004354 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004355 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004356
4357 if (mask == TRACE_ITER_RECORD_CMD)
4358 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004359
Joel Fernandesd914ba32017-06-26 19:01:55 -07004360 if (mask == TRACE_ITER_RECORD_TGID) {
4361 if (!tgid_map)
4362 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4363 GFP_KERNEL);
4364 if (!tgid_map) {
4365 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4366 return -ENOMEM;
4367 }
4368
4369 trace_event_enable_tgid_record(enabled);
4370 }
4371
Steven Rostedtc37775d2016-04-13 16:59:18 -04004372 if (mask == TRACE_ITER_EVENT_FORK)
4373 trace_event_follow_fork(tr, enabled);
4374
Namhyung Kim1e104862017-04-17 11:44:28 +09004375 if (mask == TRACE_ITER_FUNC_FORK)
4376 ftrace_pid_follow_fork(tr, enabled);
4377
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004378 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004379 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004380#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004381 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004382#endif
4383 }
Steven Rostedt81698832012-10-11 10:15:05 -04004384
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004385 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004386 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004387 trace_printk_control(enabled);
4388 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004389
4390 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004391}
4392
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004393static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004394{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004395 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004396 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004397 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004398 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004399 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004400
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004401 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004402
Li Zefan8d18eaa2009-12-08 11:17:06 +08004403 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004404 neg = 1;
4405 cmp += 2;
4406 }
4407
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004408 mutex_lock(&trace_types_lock);
4409
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004410 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004411 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004412 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004413 break;
4414 }
4415 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004416
4417 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004418 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004419 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004420
4421 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004422
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004423 /*
4424 * If the first trailing whitespace is replaced with '\0' by strstrip,
4425 * turn it back into a space.
4426 */
4427 if (orig_len > strlen(option))
4428 option[strlen(option)] = ' ';
4429
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004430 return ret;
4431}
4432
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004433static void __init apply_trace_boot_options(void)
4434{
4435 char *buf = trace_boot_options_buf;
4436 char *option;
4437
4438 while (true) {
4439 option = strsep(&buf, ",");
4440
4441 if (!option)
4442 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004443
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004444 if (*option)
4445 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004446
4447 /* Put back the comma to allow this to be called again */
4448 if (buf)
4449 *(buf - 1) = ',';
4450 }
4451}
4452
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004453static ssize_t
4454tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4455 size_t cnt, loff_t *ppos)
4456{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004457 struct seq_file *m = filp->private_data;
4458 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004459 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004460 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004461
4462 if (cnt >= sizeof(buf))
4463 return -EINVAL;
4464
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004465 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004466 return -EFAULT;
4467
Steven Rostedta8dd2172013-01-09 20:54:17 -05004468 buf[cnt] = 0;
4469
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004470 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004471 if (ret < 0)
4472 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004473
Jiri Olsacf8517c2009-10-23 19:36:16 -04004474 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004475
4476 return cnt;
4477}
4478
Li Zefanfdb372e2009-12-08 11:15:59 +08004479static int tracing_trace_options_open(struct inode *inode, struct file *file)
4480{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004481 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004482 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004483
Li Zefanfdb372e2009-12-08 11:15:59 +08004484 if (tracing_disabled)
4485 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004486
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004487 if (trace_array_get(tr) < 0)
4488 return -ENODEV;
4489
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004490 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4491 if (ret < 0)
4492 trace_array_put(tr);
4493
4494 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004495}
4496
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004497static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004498 .open = tracing_trace_options_open,
4499 .read = seq_read,
4500 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004501 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004502 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004503};
4504
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004505static const char readme_msg[] =
4506 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004507 "# echo 0 > tracing_on : quick way to disable tracing\n"
4508 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4509 " Important files:\n"
4510 " trace\t\t\t- The static contents of the buffer\n"
4511 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4512 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4513 " current_tracer\t- function and latency tracers\n"
4514 " available_tracers\t- list of configured tracers for current_tracer\n"
4515 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4516 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4517 " trace_clock\t\t-change the clock used to order events\n"
4518 " local: Per cpu clock but may not be synced across CPUs\n"
4519 " global: Synced across CPUs but slows tracing down.\n"
4520 " counter: Not a clock, but just an increment\n"
4521 " uptime: Jiffy counter from time of boot\n"
4522 " perf: Same clock that perf events use\n"
4523#ifdef CONFIG_X86_64
4524 " x86-tsc: TSC cycle counter\n"
4525#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06004526 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4527 " delta: Delta difference against a buffer-wide timestamp\n"
4528 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004529 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004530 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004531 " tracing_cpumask\t- Limit which CPUs to trace\n"
4532 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4533 "\t\t\t Remove sub-buffer with rmdir\n"
4534 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004535 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4536 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004537 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004538#ifdef CONFIG_DYNAMIC_FTRACE
4539 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004540 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4541 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004542 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004543 "\t modules: Can select a group via module\n"
4544 "\t Format: :mod:<module-name>\n"
4545 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4546 "\t triggers: a command to perform when function is hit\n"
4547 "\t Format: <function>:<trigger>[:count]\n"
4548 "\t trigger: traceon, traceoff\n"
4549 "\t\t enable_event:<system>:<event>\n"
4550 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004551#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004552 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004553#endif
4554#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004555 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004556#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004557 "\t\t dump\n"
4558 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004559 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4560 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4561 "\t The first one will disable tracing every time do_fault is hit\n"
4562 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4563 "\t The first time do trap is hit and it disables tracing, the\n"
4564 "\t counter will decrement to 2. If tracing is already disabled,\n"
4565 "\t the counter will not decrement. It only decrements when the\n"
4566 "\t trigger did work\n"
4567 "\t To remove trigger without count:\n"
4568 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4569 "\t To remove trigger with a count:\n"
4570 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004571 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004572 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4573 "\t modules: Can select a group via module command :mod:\n"
4574 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004575#endif /* CONFIG_DYNAMIC_FTRACE */
4576#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004577 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4578 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004579#endif
4580#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4581 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004582 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004583 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4584#endif
4585#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004586 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4587 "\t\t\t snapshot buffer. Read the contents for more\n"
4588 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004589#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004590#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004591 " stack_trace\t\t- Shows the max stack trace when active\n"
4592 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004593 "\t\t\t Write into this file to reset the max size (trigger a\n"
4594 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004595#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004596 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4597 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004598#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004599#endif /* CONFIG_STACK_TRACER */
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004600#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004601 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4602 "\t\t\t Write into this file to define/undefine new trace events.\n"
4603#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004604#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004605 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4606 "\t\t\t Write into this file to define/undefine new trace events.\n"
4607#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004608#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004609 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09004610 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4611 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004612 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004613#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004614 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05304615 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004616#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004617#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004618 "\t place: <path>:<offset>\n"
4619#endif
4620 "\t args: <name>=fetcharg[:type]\n"
4621 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4622 "\t $stack<index>, $stack, $retval, $comm\n"
4623 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4624 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4625#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004626 " events/\t\t- Directory containing all trace event subsystems:\n"
4627 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4628 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004629 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4630 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004631 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004632 " events/<system>/<event>/\t- Directory containing control files for\n"
4633 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004634 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4635 " filter\t\t- If set, only events passing filter are traced\n"
4636 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004637 "\t Format: <trigger>[:count][if <filter>]\n"
4638 "\t trigger: traceon, traceoff\n"
4639 "\t enable_event:<system>:<event>\n"
4640 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004641#ifdef CONFIG_HIST_TRIGGERS
4642 "\t enable_hist:<system>:<event>\n"
4643 "\t disable_hist:<system>:<event>\n"
4644#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004645#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004646 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004647#endif
4648#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004649 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004650#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004651#ifdef CONFIG_HIST_TRIGGERS
4652 "\t\t hist (see below)\n"
4653#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004654 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4655 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4656 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4657 "\t events/block/block_unplug/trigger\n"
4658 "\t The first disables tracing every time block_unplug is hit.\n"
4659 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4660 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4661 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4662 "\t Like function triggers, the counter is only decremented if it\n"
4663 "\t enabled or disabled tracing.\n"
4664 "\t To remove a trigger without a count:\n"
4665 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4666 "\t To remove a trigger with a count:\n"
4667 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4668 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004669#ifdef CONFIG_HIST_TRIGGERS
4670 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004671 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004672 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004673 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004674 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004675 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004676 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004677 "\t [if <filter>]\n\n"
4678 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004679 "\t table using the key(s) and value(s) named, and the value of a\n"
4680 "\t sum called 'hitcount' is incremented. Keys and values\n"
4681 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004682 "\t can be any field, or the special string 'stacktrace'.\n"
4683 "\t Compound keys consisting of up to two fields can be specified\n"
4684 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4685 "\t fields. Sort keys consisting of up to two fields can be\n"
4686 "\t specified using the 'sort' keyword. The sort direction can\n"
4687 "\t be modified by appending '.descending' or '.ascending' to a\n"
4688 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004689 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4690 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4691 "\t its histogram data will be shared with other triggers of the\n"
4692 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004693 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004694 "\t table in its entirety to stdout. If there are multiple hist\n"
4695 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004696 "\t trigger in the output. The table displayed for a named\n"
4697 "\t trigger will be the same as any other instance having the\n"
4698 "\t same name. The default format used to display a given field\n"
4699 "\t can be modified by appending any of the following modifiers\n"
4700 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004701 "\t .hex display a number as a hex value\n"
4702 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004703 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004704 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06004705 "\t .syscall display a syscall id as a syscall name\n"
4706 "\t .log2 display log2 value rather than raw number\n"
4707 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004708 "\t The 'pause' parameter can be used to pause an existing hist\n"
4709 "\t trigger or to start a hist trigger but not log any events\n"
4710 "\t until told to do so. 'continue' can be used to start or\n"
4711 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004712 "\t The 'clear' parameter will clear the contents of a running\n"
4713 "\t hist trigger and leave its current paused/active state\n"
4714 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004715 "\t The enable_hist and disable_hist triggers can be used to\n"
4716 "\t have one event conditionally start and stop another event's\n"
4717 "\t already-attached hist trigger. The syntax is analagous to\n"
4718 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004719#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004720;
4721
4722static ssize_t
4723tracing_readme_read(struct file *filp, char __user *ubuf,
4724 size_t cnt, loff_t *ppos)
4725{
4726 return simple_read_from_buffer(ubuf, cnt, ppos,
4727 readme_msg, strlen(readme_msg));
4728}
4729
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004730static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004731 .open = tracing_open_generic,
4732 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004733 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004734};
4735
Michael Sartain99c621d2017-07-05 22:07:15 -06004736static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4737{
4738 int *ptr = v;
4739
4740 if (*pos || m->count)
4741 ptr++;
4742
4743 (*pos)++;
4744
4745 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4746 if (trace_find_tgid(*ptr))
4747 return ptr;
4748 }
4749
4750 return NULL;
4751}
4752
4753static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4754{
4755 void *v;
4756 loff_t l = 0;
4757
4758 if (!tgid_map)
4759 return NULL;
4760
4761 v = &tgid_map[0];
4762 while (l <= *pos) {
4763 v = saved_tgids_next(m, v, &l);
4764 if (!v)
4765 return NULL;
4766 }
4767
4768 return v;
4769}
4770
4771static void saved_tgids_stop(struct seq_file *m, void *v)
4772{
4773}
4774
4775static int saved_tgids_show(struct seq_file *m, void *v)
4776{
4777 int pid = (int *)v - tgid_map;
4778
4779 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4780 return 0;
4781}
4782
4783static const struct seq_operations tracing_saved_tgids_seq_ops = {
4784 .start = saved_tgids_start,
4785 .stop = saved_tgids_stop,
4786 .next = saved_tgids_next,
4787 .show = saved_tgids_show,
4788};
4789
4790static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4791{
4792 if (tracing_disabled)
4793 return -ENODEV;
4794
4795 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4796}
4797
4798
4799static const struct file_operations tracing_saved_tgids_fops = {
4800 .open = tracing_saved_tgids_open,
4801 .read = seq_read,
4802 .llseek = seq_lseek,
4803 .release = seq_release,
4804};
4805
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004806static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004807{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004808 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004809
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004810 if (*pos || m->count)
4811 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004812
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004813 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004814
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004815 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4816 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004817 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004818 continue;
4819
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004820 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004821 }
4822
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004823 return NULL;
4824}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004825
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004826static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4827{
4828 void *v;
4829 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004830
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004831 preempt_disable();
4832 arch_spin_lock(&trace_cmdline_lock);
4833
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004834 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004835 while (l <= *pos) {
4836 v = saved_cmdlines_next(m, v, &l);
4837 if (!v)
4838 return NULL;
4839 }
4840
4841 return v;
4842}
4843
4844static void saved_cmdlines_stop(struct seq_file *m, void *v)
4845{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004846 arch_spin_unlock(&trace_cmdline_lock);
4847 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004848}
4849
4850static int saved_cmdlines_show(struct seq_file *m, void *v)
4851{
4852 char buf[TASK_COMM_LEN];
4853 unsigned int *pid = v;
4854
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004855 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004856 seq_printf(m, "%d %s\n", *pid, buf);
4857 return 0;
4858}
4859
4860static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4861 .start = saved_cmdlines_start,
4862 .next = saved_cmdlines_next,
4863 .stop = saved_cmdlines_stop,
4864 .show = saved_cmdlines_show,
4865};
4866
4867static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4868{
4869 if (tracing_disabled)
4870 return -ENODEV;
4871
4872 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004873}
4874
4875static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004876 .open = tracing_saved_cmdlines_open,
4877 .read = seq_read,
4878 .llseek = seq_lseek,
4879 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004880};
4881
4882static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004883tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4884 size_t cnt, loff_t *ppos)
4885{
4886 char buf[64];
4887 int r;
4888
4889 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004890 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004891 arch_spin_unlock(&trace_cmdline_lock);
4892
4893 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4894}
4895
4896static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4897{
4898 kfree(s->saved_cmdlines);
4899 kfree(s->map_cmdline_to_pid);
4900 kfree(s);
4901}
4902
4903static int tracing_resize_saved_cmdlines(unsigned int val)
4904{
4905 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4906
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004907 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004908 if (!s)
4909 return -ENOMEM;
4910
4911 if (allocate_cmdlines_buffer(val, s) < 0) {
4912 kfree(s);
4913 return -ENOMEM;
4914 }
4915
4916 arch_spin_lock(&trace_cmdline_lock);
4917 savedcmd_temp = savedcmd;
4918 savedcmd = s;
4919 arch_spin_unlock(&trace_cmdline_lock);
4920 free_saved_cmdlines_buffer(savedcmd_temp);
4921
4922 return 0;
4923}
4924
4925static ssize_t
4926tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4927 size_t cnt, loff_t *ppos)
4928{
4929 unsigned long val;
4930 int ret;
4931
4932 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4933 if (ret)
4934 return ret;
4935
4936 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4937 if (!val || val > PID_MAX_DEFAULT)
4938 return -EINVAL;
4939
4940 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4941 if (ret < 0)
4942 return ret;
4943
4944 *ppos += cnt;
4945
4946 return cnt;
4947}
4948
4949static const struct file_operations tracing_saved_cmdlines_size_fops = {
4950 .open = tracing_open_generic,
4951 .read = tracing_saved_cmdlines_size_read,
4952 .write = tracing_saved_cmdlines_size_write,
4953};
4954
Jeremy Linton681bec02017-05-31 16:56:53 -05004955#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004956static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004957update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004958{
Jeremy Linton00f4b652017-05-31 16:56:43 -05004959 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004960 if (ptr->tail.next) {
4961 ptr = ptr->tail.next;
4962 /* Set ptr to the next real item (skip head) */
4963 ptr++;
4964 } else
4965 return NULL;
4966 }
4967 return ptr;
4968}
4969
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004970static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004971{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004972 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004973
4974 /*
4975 * Paranoid! If ptr points to end, we don't want to increment past it.
4976 * This really should never happen.
4977 */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004978 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004979 if (WARN_ON_ONCE(!ptr))
4980 return NULL;
4981
4982 ptr++;
4983
4984 (*pos)++;
4985
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004986 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004987
4988 return ptr;
4989}
4990
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004991static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004992{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004993 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004994 loff_t l = 0;
4995
Jeremy Linton1793ed92017-05-31 16:56:46 -05004996 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004997
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004998 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004999 if (v)
5000 v++;
5001
5002 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005003 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005004 }
5005
5006 return v;
5007}
5008
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005009static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005010{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005011 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005012}
5013
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005014static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005015{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005016 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005017
5018 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005019 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005020 ptr->map.system);
5021
5022 return 0;
5023}
5024
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005025static const struct seq_operations tracing_eval_map_seq_ops = {
5026 .start = eval_map_start,
5027 .next = eval_map_next,
5028 .stop = eval_map_stop,
5029 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005030};
5031
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005032static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005033{
5034 if (tracing_disabled)
5035 return -ENODEV;
5036
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005037 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005038}
5039
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005040static const struct file_operations tracing_eval_map_fops = {
5041 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005042 .read = seq_read,
5043 .llseek = seq_lseek,
5044 .release = seq_release,
5045};
5046
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005047static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005048trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005049{
5050 /* Return tail of array given the head */
5051 return ptr + ptr->head.length + 1;
5052}
5053
5054static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005055trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005056 int len)
5057{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005058 struct trace_eval_map **stop;
5059 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005060 union trace_eval_map_item *map_array;
5061 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005062
5063 stop = start + len;
5064
5065 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005066 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005067 * where the head holds the module and length of array, and the
5068 * tail holds a pointer to the next list.
5069 */
5070 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5071 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005072 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005073 return;
5074 }
5075
Jeremy Linton1793ed92017-05-31 16:56:46 -05005076 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005077
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005078 if (!trace_eval_maps)
5079 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005080 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005081 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005082 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005083 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005084 if (!ptr->tail.next)
5085 break;
5086 ptr = ptr->tail.next;
5087
5088 }
5089 ptr->tail.next = map_array;
5090 }
5091 map_array->head.mod = mod;
5092 map_array->head.length = len;
5093 map_array++;
5094
5095 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5096 map_array->map = **map;
5097 map_array++;
5098 }
5099 memset(map_array, 0, sizeof(*map_array));
5100
Jeremy Linton1793ed92017-05-31 16:56:46 -05005101 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005102}
5103
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005104static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005105{
Jeremy Linton681bec02017-05-31 16:56:53 -05005106 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005107 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005108}
5109
Jeremy Linton681bec02017-05-31 16:56:53 -05005110#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005111static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5112static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005113 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005114#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005115
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005116static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005117 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005118{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005119 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005120
5121 if (len <= 0)
5122 return;
5123
5124 map = start;
5125
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005126 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005127
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005128 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005129}
5130
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005131static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005132tracing_set_trace_read(struct file *filp, char __user *ubuf,
5133 size_t cnt, loff_t *ppos)
5134{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005135 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005136 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005137 int r;
5138
5139 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005140 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005141 mutex_unlock(&trace_types_lock);
5142
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005143 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005144}
5145
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005146int tracer_init(struct tracer *t, struct trace_array *tr)
5147{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005148 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005149 return t->init(tr);
5150}
5151
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005152static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005153{
5154 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005155
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005156 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005157 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005158}
5159
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005160#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005161/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005162static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5163 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005164{
5165 int cpu, ret = 0;
5166
5167 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5168 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005169 ret = ring_buffer_resize(trace_buf->buffer,
5170 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005171 if (ret < 0)
5172 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005173 per_cpu_ptr(trace_buf->data, cpu)->entries =
5174 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005175 }
5176 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005177 ret = ring_buffer_resize(trace_buf->buffer,
5178 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005179 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005180 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5181 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005182 }
5183
5184 return ret;
5185}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005186#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005187
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005188static int __tracing_resize_ring_buffer(struct trace_array *tr,
5189 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005190{
5191 int ret;
5192
5193 /*
5194 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005195 * we use the size that was given, and we can forget about
5196 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005197 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005198 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005199
Steven Rostedtb382ede62012-10-10 21:44:34 -04005200 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005201 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005202 return 0;
5203
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005204 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005205 if (ret < 0)
5206 return ret;
5207
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005208#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005209 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5210 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005211 goto out;
5212
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005213 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005214 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005215 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5216 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005217 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005218 /*
5219 * AARGH! We are left with different
5220 * size max buffer!!!!
5221 * The max buffer is our "snapshot" buffer.
5222 * When a tracer needs a snapshot (one of the
5223 * latency tracers), it swaps the max buffer
5224 * with the saved snap shot. We succeeded to
5225 * update the size of the main buffer, but failed to
5226 * update the size of the max buffer. But when we tried
5227 * to reset the main buffer to the original size, we
5228 * failed there too. This is very unlikely to
5229 * happen, but if it does, warn and kill all
5230 * tracing.
5231 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005232 WARN_ON(1);
5233 tracing_disabled = 1;
5234 }
5235 return ret;
5236 }
5237
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005238 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005239 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005240 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005241 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005242
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005243 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005244#endif /* CONFIG_TRACER_MAX_TRACE */
5245
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005246 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005247 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005248 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005249 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005250
5251 return ret;
5252}
5253
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005254static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5255 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005256{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005257 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005258
5259 mutex_lock(&trace_types_lock);
5260
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005261 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5262 /* make sure, this cpu is enabled in the mask */
5263 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5264 ret = -EINVAL;
5265 goto out;
5266 }
5267 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005268
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005269 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005270 if (ret < 0)
5271 ret = -ENOMEM;
5272
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005273out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005274 mutex_unlock(&trace_types_lock);
5275
5276 return ret;
5277}
5278
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005279
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005280/**
5281 * tracing_update_buffers - used by tracing facility to expand ring buffers
5282 *
5283 * To save on memory when the tracing is never used on a system with it
5284 * configured in. The ring buffers are set to a minimum size. But once
5285 * a user starts to use the tracing facility, then they need to grow
5286 * to their default size.
5287 *
5288 * This function is to be called when a tracer is about to be used.
5289 */
5290int tracing_update_buffers(void)
5291{
5292 int ret = 0;
5293
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005294 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005295 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005296 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005297 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005298 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005299
5300 return ret;
5301}
5302
Steven Rostedt577b7852009-02-26 23:43:05 -05005303struct trace_option_dentry;
5304
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005305static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005306create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005307
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005308/*
5309 * Used to clear out the tracer before deletion of an instance.
5310 * Must have trace_types_lock held.
5311 */
5312static void tracing_set_nop(struct trace_array *tr)
5313{
5314 if (tr->current_trace == &nop_trace)
5315 return;
5316
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005317 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005318
5319 if (tr->current_trace->reset)
5320 tr->current_trace->reset(tr);
5321
5322 tr->current_trace = &nop_trace;
5323}
5324
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005325static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005326{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005327 /* Only enable if the directory has been created already. */
5328 if (!tr->dir)
5329 return;
5330
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005331 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005332}
5333
5334static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5335{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005336 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005337#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005338 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005339#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005340 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005341
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005342 mutex_lock(&trace_types_lock);
5343
Steven Rostedt73c51622009-03-11 13:42:01 -04005344 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005345 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005346 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005347 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005348 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005349 ret = 0;
5350 }
5351
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005352 for (t = trace_types; t; t = t->next) {
5353 if (strcmp(t->name, buf) == 0)
5354 break;
5355 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005356 if (!t) {
5357 ret = -EINVAL;
5358 goto out;
5359 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005360 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005361 goto out;
5362
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005363 /* Some tracers won't work on kernel command line */
5364 if (system_state < SYSTEM_RUNNING && t->noboot) {
5365 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5366 t->name);
5367 goto out;
5368 }
5369
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005370 /* Some tracers are only allowed for the top level buffer */
5371 if (!trace_ok_for_array(t, tr)) {
5372 ret = -EINVAL;
5373 goto out;
5374 }
5375
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005376 /* If trace pipe files are being read, we can't change the tracer */
5377 if (tr->current_trace->ref) {
5378 ret = -EBUSY;
5379 goto out;
5380 }
5381
Steven Rostedt9f029e82008-11-12 15:24:24 -05005382 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005383
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005384 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005385
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005386 if (tr->current_trace->reset)
5387 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005388
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005389 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005390 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005391
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005392#ifdef CONFIG_TRACER_MAX_TRACE
5393 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005394
5395 if (had_max_tr && !t->use_max_tr) {
5396 /*
5397 * We need to make sure that the update_max_tr sees that
5398 * current_trace changed to nop_trace to keep it from
5399 * swapping the buffers after we resize it.
5400 * The update_max_tr is called from interrupts disabled
5401 * so a synchronized_sched() is sufficient.
5402 */
5403 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005404 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005405 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005406#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005407
5408#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005409 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005410 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005411 if (ret < 0)
5412 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005413 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005414#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005415
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005416 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005417 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005418 if (ret)
5419 goto out;
5420 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005421
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005422 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005423 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005424 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005425 out:
5426 mutex_unlock(&trace_types_lock);
5427
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005428 return ret;
5429}
5430
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005431static ssize_t
5432tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5433 size_t cnt, loff_t *ppos)
5434{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005435 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005436 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005437 int i;
5438 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005439 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005440
Steven Rostedt60063a62008-10-28 10:44:24 -04005441 ret = cnt;
5442
Li Zefanee6c2c12009-09-18 14:06:47 +08005443 if (cnt > MAX_TRACER_SIZE)
5444 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005445
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005446 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005447 return -EFAULT;
5448
5449 buf[cnt] = 0;
5450
5451 /* strip ending whitespace. */
5452 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5453 buf[i] = 0;
5454
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005455 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005456 if (err)
5457 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005458
Jiri Olsacf8517c2009-10-23 19:36:16 -04005459 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005460
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005461 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005462}
5463
5464static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005465tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5466 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005467{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005468 char buf[64];
5469 int r;
5470
Steven Rostedtcffae432008-05-12 21:21:00 +02005471 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005472 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005473 if (r > sizeof(buf))
5474 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005475 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005476}
5477
5478static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005479tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5480 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005481{
Hannes Eder5e398412009-02-10 19:44:34 +01005482 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005483 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005484
Peter Huewe22fe9b52011-06-07 21:58:27 +02005485 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5486 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005487 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005488
5489 *ptr = val * 1000;
5490
5491 return cnt;
5492}
5493
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005494static ssize_t
5495tracing_thresh_read(struct file *filp, char __user *ubuf,
5496 size_t cnt, loff_t *ppos)
5497{
5498 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5499}
5500
5501static ssize_t
5502tracing_thresh_write(struct file *filp, const char __user *ubuf,
5503 size_t cnt, loff_t *ppos)
5504{
5505 struct trace_array *tr = filp->private_data;
5506 int ret;
5507
5508 mutex_lock(&trace_types_lock);
5509 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5510 if (ret < 0)
5511 goto out;
5512
5513 if (tr->current_trace->update_thresh) {
5514 ret = tr->current_trace->update_thresh(tr);
5515 if (ret < 0)
5516 goto out;
5517 }
5518
5519 ret = cnt;
5520out:
5521 mutex_unlock(&trace_types_lock);
5522
5523 return ret;
5524}
5525
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005526#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005527
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005528static ssize_t
5529tracing_max_lat_read(struct file *filp, char __user *ubuf,
5530 size_t cnt, loff_t *ppos)
5531{
5532 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5533}
5534
5535static ssize_t
5536tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5537 size_t cnt, loff_t *ppos)
5538{
5539 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5540}
5541
Chen Gange428abb2015-11-10 05:15:15 +08005542#endif
5543
Steven Rostedtb3806b42008-05-12 21:20:46 +02005544static int tracing_open_pipe(struct inode *inode, struct file *filp)
5545{
Oleg Nesterov15544202013-07-23 17:25:57 +02005546 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005547 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005548 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005549
5550 if (tracing_disabled)
5551 return -ENODEV;
5552
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005553 if (trace_array_get(tr) < 0)
5554 return -ENODEV;
5555
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005556 mutex_lock(&trace_types_lock);
5557
Steven Rostedtb3806b42008-05-12 21:20:46 +02005558 /* create a buffer to store the information to pass to userspace */
5559 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005560 if (!iter) {
5561 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005562 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005563 goto out;
5564 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005565
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005566 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005567 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005568
5569 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5570 ret = -ENOMEM;
5571 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305572 }
5573
Steven Rostedta3097202008-11-07 22:36:02 -05005574 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305575 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005576
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005577 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005578 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5579
David Sharp8be07092012-11-13 12:18:22 -08005580 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005581 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005582 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5583
Oleg Nesterov15544202013-07-23 17:25:57 +02005584 iter->tr = tr;
5585 iter->trace_buffer = &tr->trace_buffer;
5586 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005587 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005588 filp->private_data = iter;
5589
Steven Rostedt107bad82008-05-12 21:21:01 +02005590 if (iter->trace->pipe_open)
5591 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005592
Arnd Bergmannb4447862010-07-07 23:40:11 +02005593 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005594
5595 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005596out:
5597 mutex_unlock(&trace_types_lock);
5598 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005599
5600fail:
5601 kfree(iter->trace);
5602 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005603 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005604 mutex_unlock(&trace_types_lock);
5605 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005606}
5607
5608static int tracing_release_pipe(struct inode *inode, struct file *file)
5609{
5610 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005611 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005612
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005613 mutex_lock(&trace_types_lock);
5614
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005615 tr->current_trace->ref--;
5616
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005617 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005618 iter->trace->pipe_close(iter);
5619
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005620 mutex_unlock(&trace_types_lock);
5621
Rusty Russell44623442009-01-01 10:12:23 +10305622 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005623 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005624 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005625
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005626 trace_array_put(tr);
5627
Steven Rostedtb3806b42008-05-12 21:20:46 +02005628 return 0;
5629}
5630
Al Viro9dd95742017-07-03 00:42:43 -04005631static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005632trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005633{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005634 struct trace_array *tr = iter->tr;
5635
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005636 /* Iterators are static, they should be filled or empty */
5637 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005638 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005639
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005640 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005641 /*
5642 * Always select as readable when in blocking mode
5643 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005644 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005645 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005646 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005647 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005648}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005649
Al Viro9dd95742017-07-03 00:42:43 -04005650static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005651tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5652{
5653 struct trace_iterator *iter = filp->private_data;
5654
5655 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005656}
5657
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005658/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005659static int tracing_wait_pipe(struct file *filp)
5660{
5661 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005662 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005663
5664 while (trace_empty(iter)) {
5665
5666 if ((filp->f_flags & O_NONBLOCK)) {
5667 return -EAGAIN;
5668 }
5669
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005670 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005671 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005672 * We still block if tracing is disabled, but we have never
5673 * read anything. This allows a user to cat this file, and
5674 * then enable tracing. But after we have read something,
5675 * we give an EOF when tracing is again disabled.
5676 *
5677 * iter->pos will be 0 if we haven't read anything.
5678 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07005679 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005680 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005681
5682 mutex_unlock(&iter->mutex);
5683
Rabin Vincente30f53a2014-11-10 19:46:34 +01005684 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005685
5686 mutex_lock(&iter->mutex);
5687
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005688 if (ret)
5689 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005690 }
5691
5692 return 1;
5693}
5694
Steven Rostedtb3806b42008-05-12 21:20:46 +02005695/*
5696 * Consumer reader.
5697 */
5698static ssize_t
5699tracing_read_pipe(struct file *filp, char __user *ubuf,
5700 size_t cnt, loff_t *ppos)
5701{
5702 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005703 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005704
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005705 /*
5706 * Avoid more than one consumer on a single file descriptor
5707 * This is just a matter of traces coherency, the ring buffer itself
5708 * is protected.
5709 */
5710 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005711
5712 /* return any leftover data */
5713 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5714 if (sret != -EBUSY)
5715 goto out;
5716
5717 trace_seq_init(&iter->seq);
5718
Steven Rostedt107bad82008-05-12 21:21:01 +02005719 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005720 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5721 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005722 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005723 }
5724
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005725waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005726 sret = tracing_wait_pipe(filp);
5727 if (sret <= 0)
5728 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005729
5730 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005731 if (trace_empty(iter)) {
5732 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005733 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005734 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005735
5736 if (cnt >= PAGE_SIZE)
5737 cnt = PAGE_SIZE - 1;
5738
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005739 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005740 memset(&iter->seq, 0,
5741 sizeof(struct trace_iterator) -
5742 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005743 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005744 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005745
Lai Jiangshan4f535962009-05-18 19:35:34 +08005746 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005747 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005748 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005749 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005750 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005751
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005752 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005753 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005754 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005755 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005756 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005757 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005758 if (ret != TRACE_TYPE_NO_CONSUME)
5759 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005760
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005761 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005762 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005763
5764 /*
5765 * Setting the full flag means we reached the trace_seq buffer
5766 * size and we should leave by partial output condition above.
5767 * One of the trace_seq_* functions is not used properly.
5768 */
5769 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5770 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005771 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005772 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005773 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005774
Steven Rostedtb3806b42008-05-12 21:20:46 +02005775 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005776 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005777 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005778 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005779
5780 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005781 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005782 * entries, go back to wait for more entries.
5783 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005784 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005785 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005786
Steven Rostedt107bad82008-05-12 21:21:01 +02005787out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005788 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005789
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005790 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005791}
5792
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005793static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5794 unsigned int idx)
5795{
5796 __free_page(spd->pages[idx]);
5797}
5798
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005799static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005800 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005801 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005802 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005803 .steal = generic_pipe_buf_steal,
5804 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005805};
5806
Steven Rostedt34cd4992009-02-09 12:06:29 -05005807static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005808tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005809{
5810 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005811 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005812 int ret;
5813
5814 /* Seq buffer is page-sized, exactly what we need. */
5815 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005816 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005817 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005818
5819 if (trace_seq_has_overflowed(&iter->seq)) {
5820 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005821 break;
5822 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005823
5824 /*
5825 * This should not be hit, because it should only
5826 * be set if the iter->seq overflowed. But check it
5827 * anyway to be safe.
5828 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005829 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005830 iter->seq.seq.len = save_len;
5831 break;
5832 }
5833
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005834 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005835 if (rem < count) {
5836 rem = 0;
5837 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005838 break;
5839 }
5840
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005841 if (ret != TRACE_TYPE_NO_CONSUME)
5842 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005843 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005844 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005845 rem = 0;
5846 iter->ent = NULL;
5847 break;
5848 }
5849 }
5850
5851 return rem;
5852}
5853
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005854static ssize_t tracing_splice_read_pipe(struct file *filp,
5855 loff_t *ppos,
5856 struct pipe_inode_info *pipe,
5857 size_t len,
5858 unsigned int flags)
5859{
Jens Axboe35f3d142010-05-20 10:43:18 +02005860 struct page *pages_def[PIPE_DEF_BUFFERS];
5861 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005862 struct trace_iterator *iter = filp->private_data;
5863 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005864 .pages = pages_def,
5865 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005866 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005867 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005868 .ops = &tracing_pipe_buf_ops,
5869 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005870 };
5871 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005872 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005873 unsigned int i;
5874
Jens Axboe35f3d142010-05-20 10:43:18 +02005875 if (splice_grow_spd(pipe, &spd))
5876 return -ENOMEM;
5877
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005878 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005879
5880 if (iter->trace->splice_read) {
5881 ret = iter->trace->splice_read(iter, filp,
5882 ppos, pipe, len, flags);
5883 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005884 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005885 }
5886
5887 ret = tracing_wait_pipe(filp);
5888 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005889 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005890
Jason Wessel955b61e2010-08-05 09:22:23 -05005891 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005892 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005893 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005894 }
5895
Lai Jiangshan4f535962009-05-18 19:35:34 +08005896 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005897 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005898
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005899 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005900 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005901 spd.pages[i] = alloc_page(GFP_KERNEL);
5902 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005903 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005904
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005905 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005906
5907 /* Copy the data into the page, so we can start over. */
5908 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005909 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005910 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005911 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005912 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005913 break;
5914 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005915 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005916 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005917
Steven Rostedtf9520752009-03-02 14:04:40 -05005918 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005919 }
5920
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005921 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005922 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005923 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005924
5925 spd.nr_pages = i;
5926
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005927 if (i)
5928 ret = splice_to_pipe(pipe, &spd);
5929 else
5930 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005931out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005932 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005933 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005934
Steven Rostedt34cd4992009-02-09 12:06:29 -05005935out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005936 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005937 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005938}
5939
Steven Rostedta98a3c32008-05-12 21:20:59 +02005940static ssize_t
5941tracing_entries_read(struct file *filp, char __user *ubuf,
5942 size_t cnt, loff_t *ppos)
5943{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005944 struct inode *inode = file_inode(filp);
5945 struct trace_array *tr = inode->i_private;
5946 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005947 char buf[64];
5948 int r = 0;
5949 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005950
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005951 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005952
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005953 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005954 int cpu, buf_size_same;
5955 unsigned long size;
5956
5957 size = 0;
5958 buf_size_same = 1;
5959 /* check if all cpu sizes are same */
5960 for_each_tracing_cpu(cpu) {
5961 /* fill in the size from first enabled cpu */
5962 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005963 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5964 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005965 buf_size_same = 0;
5966 break;
5967 }
5968 }
5969
5970 if (buf_size_same) {
5971 if (!ring_buffer_expanded)
5972 r = sprintf(buf, "%lu (expanded: %lu)\n",
5973 size >> 10,
5974 trace_buf_size >> 10);
5975 else
5976 r = sprintf(buf, "%lu\n", size >> 10);
5977 } else
5978 r = sprintf(buf, "X\n");
5979 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005980 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005981
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005982 mutex_unlock(&trace_types_lock);
5983
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005984 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5985 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005986}
5987
5988static ssize_t
5989tracing_entries_write(struct file *filp, const char __user *ubuf,
5990 size_t cnt, loff_t *ppos)
5991{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005992 struct inode *inode = file_inode(filp);
5993 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005994 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005995 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005996
Peter Huewe22fe9b52011-06-07 21:58:27 +02005997 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5998 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005999 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006000
6001 /* must have at least 1 entry */
6002 if (!val)
6003 return -EINVAL;
6004
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006005 /* value is in KB */
6006 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006007 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006008 if (ret < 0)
6009 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006010
Jiri Olsacf8517c2009-10-23 19:36:16 -04006011 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006012
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006013 return cnt;
6014}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006015
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006016static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006017tracing_total_entries_read(struct file *filp, char __user *ubuf,
6018 size_t cnt, loff_t *ppos)
6019{
6020 struct trace_array *tr = filp->private_data;
6021 char buf[64];
6022 int r, cpu;
6023 unsigned long size = 0, expanded_size = 0;
6024
6025 mutex_lock(&trace_types_lock);
6026 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006027 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006028 if (!ring_buffer_expanded)
6029 expanded_size += trace_buf_size >> 10;
6030 }
6031 if (ring_buffer_expanded)
6032 r = sprintf(buf, "%lu\n", size);
6033 else
6034 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6035 mutex_unlock(&trace_types_lock);
6036
6037 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6038}
6039
6040static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006041tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6042 size_t cnt, loff_t *ppos)
6043{
6044 /*
6045 * There is no need to read what the user has written, this function
6046 * is just to make sure that there is no error when "echo" is used
6047 */
6048
6049 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006050
6051 return cnt;
6052}
6053
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006054static int
6055tracing_free_buffer_release(struct inode *inode, struct file *filp)
6056{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057 struct trace_array *tr = inode->i_private;
6058
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006059 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006060 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006061 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006062 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006063 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006064
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006065 trace_array_put(tr);
6066
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006067 return 0;
6068}
6069
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006070static ssize_t
6071tracing_mark_write(struct file *filp, const char __user *ubuf,
6072 size_t cnt, loff_t *fpos)
6073{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006074 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006075 struct ring_buffer_event *event;
6076 struct ring_buffer *buffer;
6077 struct print_entry *entry;
6078 unsigned long irq_flags;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006079 const char faulted[] = "<faulted>";
Steven Rostedtd696b582011-09-22 11:50:27 -04006080 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006081 int size;
6082 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006083
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006084/* Used in tracing_mark_raw_write() as well */
6085#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006086
Steven Rostedtc76f0692008-11-07 22:36:02 -05006087 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006088 return -EINVAL;
6089
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006090 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006091 return -EINVAL;
6092
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006093 if (cnt > TRACE_BUF_SIZE)
6094 cnt = TRACE_BUF_SIZE;
6095
Steven Rostedtd696b582011-09-22 11:50:27 -04006096 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006097
Steven Rostedtd696b582011-09-22 11:50:27 -04006098 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006099 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6100
6101 /* If less than "<faulted>", then make sure we can still add that */
6102 if (cnt < FAULTED_SIZE)
6103 size += FAULTED_SIZE - cnt;
6104
Alexander Z Lam2d716192013-07-01 15:31:24 -07006105 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006106 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6107 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006108 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006109 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006110 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006111
6112 entry = ring_buffer_event_data(event);
6113 entry->ip = _THIS_IP_;
6114
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006115 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6116 if (len) {
6117 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6118 cnt = FAULTED_SIZE;
6119 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006120 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006121 written = cnt;
6122 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006123
6124 if (entry->buf[cnt - 1] != '\n') {
6125 entry->buf[cnt] = '\n';
6126 entry->buf[cnt + 1] = '\0';
6127 } else
6128 entry->buf[cnt] = '\0';
6129
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006130 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006131
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006132 if (written > 0)
6133 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006134
Steven Rostedtfa32e852016-07-06 15:25:08 -04006135 return written;
6136}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006137
Steven Rostedtfa32e852016-07-06 15:25:08 -04006138/* Limit it for now to 3K (including tag) */
6139#define RAW_DATA_MAX_SIZE (1024*3)
6140
6141static ssize_t
6142tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6143 size_t cnt, loff_t *fpos)
6144{
6145 struct trace_array *tr = filp->private_data;
6146 struct ring_buffer_event *event;
6147 struct ring_buffer *buffer;
6148 struct raw_data_entry *entry;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006149 const char faulted[] = "<faulted>";
Steven Rostedtfa32e852016-07-06 15:25:08 -04006150 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006151 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006152 int size;
6153 int len;
6154
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006155#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6156
Steven Rostedtfa32e852016-07-06 15:25:08 -04006157 if (tracing_disabled)
6158 return -EINVAL;
6159
6160 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6161 return -EINVAL;
6162
6163 /* The marker must at least have a tag id */
6164 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6165 return -EINVAL;
6166
6167 if (cnt > TRACE_BUF_SIZE)
6168 cnt = TRACE_BUF_SIZE;
6169
6170 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6171
Steven Rostedtfa32e852016-07-06 15:25:08 -04006172 local_save_flags(irq_flags);
6173 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006174 if (cnt < FAULT_SIZE_ID)
6175 size += FAULT_SIZE_ID - cnt;
6176
Steven Rostedtfa32e852016-07-06 15:25:08 -04006177 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006178 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6179 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006180 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006181 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006182 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006183
6184 entry = ring_buffer_event_data(event);
6185
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006186 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6187 if (len) {
6188 entry->id = -1;
6189 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6190 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006191 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006192 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006193
6194 __buffer_unlock_commit(buffer, event);
6195
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006196 if (written > 0)
6197 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006198
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006199 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006200}
6201
Li Zefan13f16d22009-12-08 11:16:11 +08006202static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006203{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006204 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006205 int i;
6206
6207 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006208 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006209 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006210 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6211 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006212 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006213
Li Zefan13f16d22009-12-08 11:16:11 +08006214 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006215}
6216
Tom Zanussid71bd342018-01-15 20:52:07 -06006217int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006218{
Zhaolei5079f322009-08-25 16:12:56 +08006219 int i;
6220
Zhaolei5079f322009-08-25 16:12:56 +08006221 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6222 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6223 break;
6224 }
6225 if (i == ARRAY_SIZE(trace_clocks))
6226 return -EINVAL;
6227
Zhaolei5079f322009-08-25 16:12:56 +08006228 mutex_lock(&trace_types_lock);
6229
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006230 tr->clock_id = i;
6231
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006232 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006233
David Sharp60303ed2012-10-11 16:27:52 -07006234 /*
6235 * New clock may not be consistent with the previous clock.
6236 * Reset the buffer so that it doesn't have incomparable timestamps.
6237 */
Alexander Z Lam94571582013-08-02 18:36:16 -07006238 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006239
6240#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006241 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006242 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006243 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006244#endif
David Sharp60303ed2012-10-11 16:27:52 -07006245
Zhaolei5079f322009-08-25 16:12:56 +08006246 mutex_unlock(&trace_types_lock);
6247
Steven Rostedte1e232c2014-02-10 23:38:46 -05006248 return 0;
6249}
6250
6251static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6252 size_t cnt, loff_t *fpos)
6253{
6254 struct seq_file *m = filp->private_data;
6255 struct trace_array *tr = m->private;
6256 char buf[64];
6257 const char *clockstr;
6258 int ret;
6259
6260 if (cnt >= sizeof(buf))
6261 return -EINVAL;
6262
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006263 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006264 return -EFAULT;
6265
6266 buf[cnt] = 0;
6267
6268 clockstr = strstrip(buf);
6269
6270 ret = tracing_set_clock(tr, clockstr);
6271 if (ret)
6272 return ret;
6273
Zhaolei5079f322009-08-25 16:12:56 +08006274 *fpos += cnt;
6275
6276 return cnt;
6277}
6278
Li Zefan13f16d22009-12-08 11:16:11 +08006279static int tracing_clock_open(struct inode *inode, struct file *file)
6280{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006281 struct trace_array *tr = inode->i_private;
6282 int ret;
6283
Li Zefan13f16d22009-12-08 11:16:11 +08006284 if (tracing_disabled)
6285 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006286
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006287 if (trace_array_get(tr))
6288 return -ENODEV;
6289
6290 ret = single_open(file, tracing_clock_show, inode->i_private);
6291 if (ret < 0)
6292 trace_array_put(tr);
6293
6294 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006295}
6296
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006297static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6298{
6299 struct trace_array *tr = m->private;
6300
6301 mutex_lock(&trace_types_lock);
6302
6303 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6304 seq_puts(m, "delta [absolute]\n");
6305 else
6306 seq_puts(m, "[delta] absolute\n");
6307
6308 mutex_unlock(&trace_types_lock);
6309
6310 return 0;
6311}
6312
6313static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6314{
6315 struct trace_array *tr = inode->i_private;
6316 int ret;
6317
6318 if (tracing_disabled)
6319 return -ENODEV;
6320
6321 if (trace_array_get(tr))
6322 return -ENODEV;
6323
6324 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6325 if (ret < 0)
6326 trace_array_put(tr);
6327
6328 return ret;
6329}
6330
Tom Zanussi00b41452018-01-15 20:51:39 -06006331int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6332{
6333 int ret = 0;
6334
6335 mutex_lock(&trace_types_lock);
6336
6337 if (abs && tr->time_stamp_abs_ref++)
6338 goto out;
6339
6340 if (!abs) {
6341 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6342 ret = -EINVAL;
6343 goto out;
6344 }
6345
6346 if (--tr->time_stamp_abs_ref)
6347 goto out;
6348 }
6349
6350 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6351
6352#ifdef CONFIG_TRACER_MAX_TRACE
6353 if (tr->max_buffer.buffer)
6354 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6355#endif
6356 out:
6357 mutex_unlock(&trace_types_lock);
6358
6359 return ret;
6360}
6361
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006362struct ftrace_buffer_info {
6363 struct trace_iterator iter;
6364 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006365 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006366 unsigned int read;
6367};
6368
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006369#ifdef CONFIG_TRACER_SNAPSHOT
6370static int tracing_snapshot_open(struct inode *inode, struct file *file)
6371{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006372 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006373 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006374 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006375 int ret = 0;
6376
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006377 if (trace_array_get(tr) < 0)
6378 return -ENODEV;
6379
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006380 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006381 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006382 if (IS_ERR(iter))
6383 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006384 } else {
6385 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006386 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006387 m = kzalloc(sizeof(*m), GFP_KERNEL);
6388 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006389 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006390 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6391 if (!iter) {
6392 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006393 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006394 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006395 ret = 0;
6396
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006397 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006398 iter->trace_buffer = &tr->max_buffer;
6399 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006400 m->private = iter;
6401 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006402 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006403out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006404 if (ret < 0)
6405 trace_array_put(tr);
6406
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006407 return ret;
6408}
6409
6410static ssize_t
6411tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6412 loff_t *ppos)
6413{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006414 struct seq_file *m = filp->private_data;
6415 struct trace_iterator *iter = m->private;
6416 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006417 unsigned long val;
6418 int ret;
6419
6420 ret = tracing_update_buffers();
6421 if (ret < 0)
6422 return ret;
6423
6424 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6425 if (ret)
6426 return ret;
6427
6428 mutex_lock(&trace_types_lock);
6429
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006430 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006431 ret = -EBUSY;
6432 goto out;
6433 }
6434
6435 switch (val) {
6436 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006437 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6438 ret = -EINVAL;
6439 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006440 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006441 if (tr->allocated_snapshot)
6442 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006443 break;
6444 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006445/* Only allow per-cpu swap if the ring buffer supports it */
6446#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6447 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6448 ret = -EINVAL;
6449 break;
6450 }
6451#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006452 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006453 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006454 if (ret < 0)
6455 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006456 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006457 local_irq_disable();
6458 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006459 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006460 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006461 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006462 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006463 local_irq_enable();
6464 break;
6465 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006466 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006467 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6468 tracing_reset_online_cpus(&tr->max_buffer);
6469 else
6470 tracing_reset(&tr->max_buffer, iter->cpu_file);
6471 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006472 break;
6473 }
6474
6475 if (ret >= 0) {
6476 *ppos += cnt;
6477 ret = cnt;
6478 }
6479out:
6480 mutex_unlock(&trace_types_lock);
6481 return ret;
6482}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006483
6484static int tracing_snapshot_release(struct inode *inode, struct file *file)
6485{
6486 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006487 int ret;
6488
6489 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006490
6491 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006492 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006493
6494 /* If write only, the seq_file is just a stub */
6495 if (m)
6496 kfree(m->private);
6497 kfree(m);
6498
6499 return 0;
6500}
6501
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006502static int tracing_buffers_open(struct inode *inode, struct file *filp);
6503static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6504 size_t count, loff_t *ppos);
6505static int tracing_buffers_release(struct inode *inode, struct file *file);
6506static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6507 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6508
6509static int snapshot_raw_open(struct inode *inode, struct file *filp)
6510{
6511 struct ftrace_buffer_info *info;
6512 int ret;
6513
6514 ret = tracing_buffers_open(inode, filp);
6515 if (ret < 0)
6516 return ret;
6517
6518 info = filp->private_data;
6519
6520 if (info->iter.trace->use_max_tr) {
6521 tracing_buffers_release(inode, filp);
6522 return -EBUSY;
6523 }
6524
6525 info->iter.snapshot = true;
6526 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6527
6528 return ret;
6529}
6530
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006531#endif /* CONFIG_TRACER_SNAPSHOT */
6532
6533
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006534static const struct file_operations tracing_thresh_fops = {
6535 .open = tracing_open_generic,
6536 .read = tracing_thresh_read,
6537 .write = tracing_thresh_write,
6538 .llseek = generic_file_llseek,
6539};
6540
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006541#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006542static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006543 .open = tracing_open_generic,
6544 .read = tracing_max_lat_read,
6545 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006546 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006547};
Chen Gange428abb2015-11-10 05:15:15 +08006548#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006549
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006550static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006551 .open = tracing_open_generic,
6552 .read = tracing_set_trace_read,
6553 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006554 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006555};
6556
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006557static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006558 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006559 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006560 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006561 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006562 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006563 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006564};
6565
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006566static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006567 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006568 .read = tracing_entries_read,
6569 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006570 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006571 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006572};
6573
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006574static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006575 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006576 .read = tracing_total_entries_read,
6577 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006578 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006579};
6580
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006581static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006582 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006583 .write = tracing_free_buffer_write,
6584 .release = tracing_free_buffer_release,
6585};
6586
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006587static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006588 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006589 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006590 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006591 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006592};
6593
Steven Rostedtfa32e852016-07-06 15:25:08 -04006594static const struct file_operations tracing_mark_raw_fops = {
6595 .open = tracing_open_generic_tr,
6596 .write = tracing_mark_raw_write,
6597 .llseek = generic_file_llseek,
6598 .release = tracing_release_generic_tr,
6599};
6600
Zhaolei5079f322009-08-25 16:12:56 +08006601static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006602 .open = tracing_clock_open,
6603 .read = seq_read,
6604 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006605 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006606 .write = tracing_clock_write,
6607};
6608
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006609static const struct file_operations trace_time_stamp_mode_fops = {
6610 .open = tracing_time_stamp_mode_open,
6611 .read = seq_read,
6612 .llseek = seq_lseek,
6613 .release = tracing_single_release_tr,
6614};
6615
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006616#ifdef CONFIG_TRACER_SNAPSHOT
6617static const struct file_operations snapshot_fops = {
6618 .open = tracing_snapshot_open,
6619 .read = seq_read,
6620 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006621 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006622 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006623};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006624
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006625static const struct file_operations snapshot_raw_fops = {
6626 .open = snapshot_raw_open,
6627 .read = tracing_buffers_read,
6628 .release = tracing_buffers_release,
6629 .splice_read = tracing_buffers_splice_read,
6630 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006631};
6632
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006633#endif /* CONFIG_TRACER_SNAPSHOT */
6634
Steven Rostedt2cadf912008-12-01 22:20:19 -05006635static int tracing_buffers_open(struct inode *inode, struct file *filp)
6636{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006637 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006638 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006639 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006640
6641 if (tracing_disabled)
6642 return -ENODEV;
6643
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006644 if (trace_array_get(tr) < 0)
6645 return -ENODEV;
6646
Steven Rostedt2cadf912008-12-01 22:20:19 -05006647 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006648 if (!info) {
6649 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006650 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006651 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006652
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006653 mutex_lock(&trace_types_lock);
6654
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006655 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006656 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006657 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006658 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006659 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006660 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006661 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006662
6663 filp->private_data = info;
6664
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006665 tr->current_trace->ref++;
6666
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006667 mutex_unlock(&trace_types_lock);
6668
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006669 ret = nonseekable_open(inode, filp);
6670 if (ret < 0)
6671 trace_array_put(tr);
6672
6673 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006674}
6675
Al Viro9dd95742017-07-03 00:42:43 -04006676static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006677tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6678{
6679 struct ftrace_buffer_info *info = filp->private_data;
6680 struct trace_iterator *iter = &info->iter;
6681
6682 return trace_poll(iter, filp, poll_table);
6683}
6684
Steven Rostedt2cadf912008-12-01 22:20:19 -05006685static ssize_t
6686tracing_buffers_read(struct file *filp, char __user *ubuf,
6687 size_t count, loff_t *ppos)
6688{
6689 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006690 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006691 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006692 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006693
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006694 if (!count)
6695 return 0;
6696
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006697#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006698 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6699 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006700#endif
6701
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006702 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006703 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6704 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006705 if (IS_ERR(info->spare)) {
6706 ret = PTR_ERR(info->spare);
6707 info->spare = NULL;
6708 } else {
6709 info->spare_cpu = iter->cpu_file;
6710 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006711 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006712 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006713 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006714
Steven Rostedt2cadf912008-12-01 22:20:19 -05006715 /* Do we have previous read data to read? */
6716 if (info->read < PAGE_SIZE)
6717 goto read;
6718
Steven Rostedtb6273442013-02-28 13:44:11 -05006719 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006720 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006721 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006722 &info->spare,
6723 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006724 iter->cpu_file, 0);
6725 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006726
6727 if (ret < 0) {
6728 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006729 if ((filp->f_flags & O_NONBLOCK))
6730 return -EAGAIN;
6731
Rabin Vincente30f53a2014-11-10 19:46:34 +01006732 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006733 if (ret)
6734 return ret;
6735
Steven Rostedtb6273442013-02-28 13:44:11 -05006736 goto again;
6737 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006738 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006739 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006740
Steven Rostedt436fc282011-10-14 10:44:25 -04006741 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006742 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006743 size = PAGE_SIZE - info->read;
6744 if (size > count)
6745 size = count;
6746
6747 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006748 if (ret == size)
6749 return -EFAULT;
6750
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006751 size -= ret;
6752
Steven Rostedt2cadf912008-12-01 22:20:19 -05006753 *ppos += size;
6754 info->read += size;
6755
6756 return size;
6757}
6758
6759static int tracing_buffers_release(struct inode *inode, struct file *file)
6760{
6761 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006762 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006763
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006764 mutex_lock(&trace_types_lock);
6765
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006766 iter->tr->current_trace->ref--;
6767
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006768 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006769
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006770 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006771 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6772 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006773 kfree(info);
6774
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006775 mutex_unlock(&trace_types_lock);
6776
Steven Rostedt2cadf912008-12-01 22:20:19 -05006777 return 0;
6778}
6779
6780struct buffer_ref {
6781 struct ring_buffer *buffer;
6782 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006783 int cpu;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006784 int ref;
6785};
6786
6787static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6788 struct pipe_buffer *buf)
6789{
6790 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6791
6792 if (--ref->ref)
6793 return;
6794
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006795 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006796 kfree(ref);
6797 buf->private = 0;
6798}
6799
Steven Rostedt2cadf912008-12-01 22:20:19 -05006800static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6801 struct pipe_buffer *buf)
6802{
6803 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6804
6805 ref->ref++;
6806}
6807
6808/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006809static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006810 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006811 .confirm = generic_pipe_buf_confirm,
6812 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006813 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006814 .get = buffer_pipe_buf_get,
6815};
6816
6817/*
6818 * Callback from splice_to_pipe(), if we need to release some pages
6819 * at the end of the spd in case we error'ed out in filling the pipe.
6820 */
6821static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6822{
6823 struct buffer_ref *ref =
6824 (struct buffer_ref *)spd->partial[i].private;
6825
6826 if (--ref->ref)
6827 return;
6828
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006829 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006830 kfree(ref);
6831 spd->partial[i].private = 0;
6832}
6833
6834static ssize_t
6835tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6836 struct pipe_inode_info *pipe, size_t len,
6837 unsigned int flags)
6838{
6839 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006840 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006841 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6842 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006843 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006844 .pages = pages_def,
6845 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006846 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006847 .ops = &buffer_pipe_buf_ops,
6848 .spd_release = buffer_spd_release,
6849 };
6850 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05006851 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006852 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006853
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006854#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006855 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6856 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006857#endif
6858
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006859 if (*ppos & (PAGE_SIZE - 1))
6860 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006861
6862 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006863 if (len < PAGE_SIZE)
6864 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006865 len &= PAGE_MASK;
6866 }
6867
Al Viro1ae22932016-09-17 18:31:46 -04006868 if (splice_grow_spd(pipe, &spd))
6869 return -ENOMEM;
6870
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006871 again:
6872 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006873 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006874
Al Viroa786c062014-04-11 12:01:03 -04006875 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006876 struct page *page;
6877 int r;
6878
6879 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006880 if (!ref) {
6881 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006882 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006883 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006884
Steven Rostedt7267fa62009-04-29 00:16:21 -04006885 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006886 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006887 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006888 if (IS_ERR(ref->page)) {
6889 ret = PTR_ERR(ref->page);
6890 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006891 kfree(ref);
6892 break;
6893 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006894 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006895
6896 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006897 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006898 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006899 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6900 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006901 kfree(ref);
6902 break;
6903 }
6904
Steven Rostedt2cadf912008-12-01 22:20:19 -05006905 page = virt_to_page(ref->page);
6906
6907 spd.pages[i] = page;
6908 spd.partial[i].len = PAGE_SIZE;
6909 spd.partial[i].offset = 0;
6910 spd.partial[i].private = (unsigned long)ref;
6911 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006912 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006913
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006914 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006915 }
6916
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006917 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006918 spd.nr_pages = i;
6919
6920 /* did we read anything? */
6921 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006922 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006923 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006924
Al Viro1ae22932016-09-17 18:31:46 -04006925 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006926 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006927 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006928
Rabin Vincente30f53a2014-11-10 19:46:34 +01006929 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006930 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006931 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006932
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006933 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006934 }
6935
6936 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006937out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006938 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006939
Steven Rostedt2cadf912008-12-01 22:20:19 -05006940 return ret;
6941}
6942
6943static const struct file_operations tracing_buffers_fops = {
6944 .open = tracing_buffers_open,
6945 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006946 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006947 .release = tracing_buffers_release,
6948 .splice_read = tracing_buffers_splice_read,
6949 .llseek = no_llseek,
6950};
6951
Steven Rostedtc8d77182009-04-29 18:03:45 -04006952static ssize_t
6953tracing_stats_read(struct file *filp, char __user *ubuf,
6954 size_t count, loff_t *ppos)
6955{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006956 struct inode *inode = file_inode(filp);
6957 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006958 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006959 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006960 struct trace_seq *s;
6961 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006962 unsigned long long t;
6963 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006964
Li Zefane4f2d102009-06-15 10:57:28 +08006965 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006966 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006967 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006968
6969 trace_seq_init(s);
6970
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006971 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006972 trace_seq_printf(s, "entries: %ld\n", cnt);
6973
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006974 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006975 trace_seq_printf(s, "overrun: %ld\n", cnt);
6976
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006977 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006978 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6979
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006980 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006981 trace_seq_printf(s, "bytes: %ld\n", cnt);
6982
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006983 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006984 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006985 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006986 usec_rem = do_div(t, USEC_PER_SEC);
6987 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6988 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006989
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006990 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006991 usec_rem = do_div(t, USEC_PER_SEC);
6992 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6993 } else {
6994 /* counter or tsc mode for trace_clock */
6995 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006996 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006997
6998 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006999 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007000 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007001
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007002 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007003 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7004
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007005 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007006 trace_seq_printf(s, "read events: %ld\n", cnt);
7007
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007008 count = simple_read_from_buffer(ubuf, count, ppos,
7009 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007010
7011 kfree(s);
7012
7013 return count;
7014}
7015
7016static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007017 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007018 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007019 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007020 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007021};
7022
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007023#ifdef CONFIG_DYNAMIC_FTRACE
7024
7025static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007026tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007027 size_t cnt, loff_t *ppos)
7028{
7029 unsigned long *p = filp->private_data;
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007030 char buf[64]; /* Not too big for a shallow stack */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007031 int r;
7032
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007033 r = scnprintf(buf, 63, "%ld", *p);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007034 buf[r++] = '\n';
7035
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007036 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007037}
7038
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007039static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007040 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007041 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007042 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007043};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007044#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007045
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007046#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7047static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007048ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007049 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007050 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007051{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007052 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007053}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007054
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007055static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007056ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007057 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007058 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007059{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007060 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007061 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007062
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007063 if (mapper)
7064 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007065
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007066 if (count) {
7067
7068 if (*count <= 0)
7069 return;
7070
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007071 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007072 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007073
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007074 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007075}
7076
7077static int
7078ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7079 struct ftrace_probe_ops *ops, void *data)
7080{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007081 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007082 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007083
7084 seq_printf(m, "%ps:", (void *)ip);
7085
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007086 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007087
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007088 if (mapper)
7089 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7090
7091 if (count)
7092 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007093 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007094 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007095
7096 return 0;
7097}
7098
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007099static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007100ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007101 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007102{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007103 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007104
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007105 if (!mapper) {
7106 mapper = allocate_ftrace_func_mapper();
7107 if (!mapper)
7108 return -ENOMEM;
7109 *data = mapper;
7110 }
7111
7112 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007113}
7114
7115static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007116ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007117 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007118{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007119 struct ftrace_func_mapper *mapper = data;
7120
7121 if (!ip) {
7122 if (!mapper)
7123 return;
7124 free_ftrace_func_mapper(mapper, NULL);
7125 return;
7126 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007127
7128 ftrace_func_mapper_remove_ip(mapper, ip);
7129}
7130
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007131static struct ftrace_probe_ops snapshot_probe_ops = {
7132 .func = ftrace_snapshot,
7133 .print = ftrace_snapshot_print,
7134};
7135
7136static struct ftrace_probe_ops snapshot_count_probe_ops = {
7137 .func = ftrace_count_snapshot,
7138 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007139 .init = ftrace_snapshot_init,
7140 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007141};
7142
7143static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007144ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007145 char *glob, char *cmd, char *param, int enable)
7146{
7147 struct ftrace_probe_ops *ops;
7148 void *count = (void *)-1;
7149 char *number;
7150 int ret;
7151
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007152 if (!tr)
7153 return -ENODEV;
7154
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007155 /* hash funcs only work with set_ftrace_filter */
7156 if (!enable)
7157 return -EINVAL;
7158
7159 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7160
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007161 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007162 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007163
7164 if (!param)
7165 goto out_reg;
7166
7167 number = strsep(&param, ":");
7168
7169 if (!strlen(number))
7170 goto out_reg;
7171
7172 /*
7173 * We use the callback data field (which is a pointer)
7174 * as our counter.
7175 */
7176 ret = kstrtoul(number, 0, (unsigned long *)&count);
7177 if (ret)
7178 return ret;
7179
7180 out_reg:
Linus Torvalds4c174682017-05-03 18:41:21 -07007181 ret = alloc_snapshot(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007182 if (ret < 0)
7183 goto out;
7184
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007185 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007186
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007187 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007188 return ret < 0 ? ret : 0;
7189}
7190
7191static struct ftrace_func_command ftrace_snapshot_cmd = {
7192 .name = "snapshot",
7193 .func = ftrace_trace_snapshot_callback,
7194};
7195
Tom Zanussi38de93a2013-10-24 08:34:18 -05007196static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007197{
7198 return register_ftrace_command(&ftrace_snapshot_cmd);
7199}
7200#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007201static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007202#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007203
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007204static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007205{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007206 if (WARN_ON(!tr->dir))
7207 return ERR_PTR(-ENODEV);
7208
7209 /* Top directory uses NULL as the parent */
7210 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7211 return NULL;
7212
7213 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007214 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007215}
7216
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007217static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7218{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007219 struct dentry *d_tracer;
7220
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007221 if (tr->percpu_dir)
7222 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007223
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007224 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007225 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007226 return NULL;
7227
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007228 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007229
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007230 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007231 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007232
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007233 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007234}
7235
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007236static struct dentry *
7237trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7238 void *data, long cpu, const struct file_operations *fops)
7239{
7240 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7241
7242 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007243 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007244 return ret;
7245}
7246
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007247static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007248tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007249{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007250 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007251 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007252 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007253
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007254 if (!d_percpu)
7255 return;
7256
Steven Rostedtdd49a382010-10-20 21:51:26 -04007257 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007258 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007259 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007260 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007261 return;
7262 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007263
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007264 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007265 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02007266 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007267
7268 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007269 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007270 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04007271
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007272 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007273 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007274
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007275 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007276 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08007277
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007278 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007279 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007280
7281#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007282 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007283 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007284
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007285 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007286 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007287#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007288}
7289
Steven Rostedt60a11772008-05-12 21:20:44 +02007290#ifdef CONFIG_FTRACE_SELFTEST
7291/* Let selftest have access to static functions in this file */
7292#include "trace_selftest.c"
7293#endif
7294
Steven Rostedt577b7852009-02-26 23:43:05 -05007295static ssize_t
7296trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7297 loff_t *ppos)
7298{
7299 struct trace_option_dentry *topt = filp->private_data;
7300 char *buf;
7301
7302 if (topt->flags->val & topt->opt->bit)
7303 buf = "1\n";
7304 else
7305 buf = "0\n";
7306
7307 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7308}
7309
7310static ssize_t
7311trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7312 loff_t *ppos)
7313{
7314 struct trace_option_dentry *topt = filp->private_data;
7315 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007316 int ret;
7317
Peter Huewe22fe9b52011-06-07 21:58:27 +02007318 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7319 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007320 return ret;
7321
Li Zefan8d18eaa2009-12-08 11:17:06 +08007322 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007323 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007324
7325 if (!!(topt->flags->val & topt->opt->bit) != val) {
7326 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007327 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007328 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007329 mutex_unlock(&trace_types_lock);
7330 if (ret)
7331 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007332 }
7333
7334 *ppos += cnt;
7335
7336 return cnt;
7337}
7338
7339
7340static const struct file_operations trace_options_fops = {
7341 .open = tracing_open_generic,
7342 .read = trace_options_read,
7343 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007344 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007345};
7346
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007347/*
7348 * In order to pass in both the trace_array descriptor as well as the index
7349 * to the flag that the trace option file represents, the trace_array
7350 * has a character array of trace_flags_index[], which holds the index
7351 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7352 * The address of this character array is passed to the flag option file
7353 * read/write callbacks.
7354 *
7355 * In order to extract both the index and the trace_array descriptor,
7356 * get_tr_index() uses the following algorithm.
7357 *
7358 * idx = *ptr;
7359 *
7360 * As the pointer itself contains the address of the index (remember
7361 * index[1] == 1).
7362 *
7363 * Then to get the trace_array descriptor, by subtracting that index
7364 * from the ptr, we get to the start of the index itself.
7365 *
7366 * ptr - idx == &index[0]
7367 *
7368 * Then a simple container_of() from that pointer gets us to the
7369 * trace_array descriptor.
7370 */
7371static void get_tr_index(void *data, struct trace_array **ptr,
7372 unsigned int *pindex)
7373{
7374 *pindex = *(unsigned char *)data;
7375
7376 *ptr = container_of(data - *pindex, struct trace_array,
7377 trace_flags_index);
7378}
7379
Steven Rostedta8259072009-02-26 22:19:12 -05007380static ssize_t
7381trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7382 loff_t *ppos)
7383{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007384 void *tr_index = filp->private_data;
7385 struct trace_array *tr;
7386 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007387 char *buf;
7388
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007389 get_tr_index(tr_index, &tr, &index);
7390
7391 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007392 buf = "1\n";
7393 else
7394 buf = "0\n";
7395
7396 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7397}
7398
7399static ssize_t
7400trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7401 loff_t *ppos)
7402{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007403 void *tr_index = filp->private_data;
7404 struct trace_array *tr;
7405 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007406 unsigned long val;
7407 int ret;
7408
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007409 get_tr_index(tr_index, &tr, &index);
7410
Peter Huewe22fe9b52011-06-07 21:58:27 +02007411 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7412 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007413 return ret;
7414
Zhaoleif2d84b62009-08-07 18:55:48 +08007415 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007416 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007417
7418 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007419 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007420 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007421
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007422 if (ret < 0)
7423 return ret;
7424
Steven Rostedta8259072009-02-26 22:19:12 -05007425 *ppos += cnt;
7426
7427 return cnt;
7428}
7429
Steven Rostedta8259072009-02-26 22:19:12 -05007430static const struct file_operations trace_options_core_fops = {
7431 .open = tracing_open_generic,
7432 .read = trace_options_core_read,
7433 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007434 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007435};
7436
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007437struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007438 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007439 struct dentry *parent,
7440 void *data,
7441 const struct file_operations *fops)
7442{
7443 struct dentry *ret;
7444
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007445 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007446 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007447 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007448
7449 return ret;
7450}
7451
7452
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007453static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007454{
7455 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007456
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007457 if (tr->options)
7458 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007459
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007460 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007461 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007462 return NULL;
7463
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007464 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007465 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007466 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007467 return NULL;
7468 }
7469
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007470 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007471}
7472
Steven Rostedt577b7852009-02-26 23:43:05 -05007473static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007474create_trace_option_file(struct trace_array *tr,
7475 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007476 struct tracer_flags *flags,
7477 struct tracer_opt *opt)
7478{
7479 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007480
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007481 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007482 if (!t_options)
7483 return;
7484
7485 topt->flags = flags;
7486 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007487 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007488
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007489 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007490 &trace_options_fops);
7491
Steven Rostedt577b7852009-02-26 23:43:05 -05007492}
7493
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007494static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007495create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007496{
7497 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007498 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007499 struct tracer_flags *flags;
7500 struct tracer_opt *opts;
7501 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007502 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007503
7504 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007505 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007506
7507 flags = tracer->flags;
7508
7509 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007510 return;
7511
7512 /*
7513 * If this is an instance, only create flags for tracers
7514 * the instance may have.
7515 */
7516 if (!trace_ok_for_array(tracer, tr))
7517 return;
7518
7519 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007520 /* Make sure there's no duplicate flags. */
7521 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007522 return;
7523 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007524
7525 opts = flags->opts;
7526
7527 for (cnt = 0; opts[cnt].name; cnt++)
7528 ;
7529
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007530 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007531 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007532 return;
7533
7534 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7535 GFP_KERNEL);
7536 if (!tr_topts) {
7537 kfree(topts);
7538 return;
7539 }
7540
7541 tr->topts = tr_topts;
7542 tr->topts[tr->nr_topts].tracer = tracer;
7543 tr->topts[tr->nr_topts].topts = topts;
7544 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007545
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007546 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007547 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007548 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007549 WARN_ONCE(topts[cnt].entry == NULL,
7550 "Failed to create trace option: %s",
7551 opts[cnt].name);
7552 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007553}
7554
Steven Rostedta8259072009-02-26 22:19:12 -05007555static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007556create_trace_option_core_file(struct trace_array *tr,
7557 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007558{
7559 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007560
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007561 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007562 if (!t_options)
7563 return NULL;
7564
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007565 return trace_create_file(option, 0644, t_options,
7566 (void *)&tr->trace_flags_index[index],
7567 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007568}
7569
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007570static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007571{
7572 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007573 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007574 int i;
7575
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007576 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007577 if (!t_options)
7578 return;
7579
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007580 for (i = 0; trace_options[i]; i++) {
7581 if (top_level ||
7582 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7583 create_trace_option_core_file(tr, trace_options[i], i);
7584 }
Steven Rostedta8259072009-02-26 22:19:12 -05007585}
7586
Steven Rostedt499e5472012-02-22 15:50:28 -05007587static ssize_t
7588rb_simple_read(struct file *filp, char __user *ubuf,
7589 size_t cnt, loff_t *ppos)
7590{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007591 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007592 char buf[64];
7593 int r;
7594
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007595 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007596 r = sprintf(buf, "%d\n", r);
7597
7598 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7599}
7600
7601static ssize_t
7602rb_simple_write(struct file *filp, const char __user *ubuf,
7603 size_t cnt, loff_t *ppos)
7604{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007605 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007606 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007607 unsigned long val;
7608 int ret;
7609
7610 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7611 if (ret)
7612 return ret;
7613
7614 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007615 mutex_lock(&trace_types_lock);
7616 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007617 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007618 if (tr->current_trace->start)
7619 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007620 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007621 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007622 if (tr->current_trace->stop)
7623 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007624 }
7625 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007626 }
7627
7628 (*ppos)++;
7629
7630 return cnt;
7631}
7632
7633static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007634 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007635 .read = rb_simple_read,
7636 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007637 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007638 .llseek = default_llseek,
7639};
7640
Steven Rostedt277ba042012-08-03 16:10:49 -04007641struct dentry *trace_instance_dir;
7642
7643static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007644init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007645
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007646static int
7647allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007648{
7649 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007650
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007651 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007652
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007653 buf->tr = tr;
7654
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007655 buf->buffer = ring_buffer_alloc(size, rb_flags);
7656 if (!buf->buffer)
7657 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007658
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007659 buf->data = alloc_percpu(struct trace_array_cpu);
7660 if (!buf->data) {
7661 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05007662 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007663 return -ENOMEM;
7664 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007665
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007666 /* Allocate the first page for all buffers */
7667 set_buffer_entries(&tr->trace_buffer,
7668 ring_buffer_size(tr->trace_buffer.buffer, 0));
7669
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007670 return 0;
7671}
7672
7673static int allocate_trace_buffers(struct trace_array *tr, int size)
7674{
7675 int ret;
7676
7677 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7678 if (ret)
7679 return ret;
7680
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007681#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007682 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7683 allocate_snapshot ? size : 1);
7684 if (WARN_ON(ret)) {
7685 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia24f2aaf2017-12-26 15:12:53 +08007686 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007687 free_percpu(tr->trace_buffer.data);
Jing Xia24f2aaf2017-12-26 15:12:53 +08007688 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007689 return -ENOMEM;
7690 }
7691 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007692
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007693 /*
7694 * Only the top level trace array gets its snapshot allocated
7695 * from the kernel command line.
7696 */
7697 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007698#endif
7699 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007700}
7701
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007702static void free_trace_buffer(struct trace_buffer *buf)
7703{
7704 if (buf->buffer) {
7705 ring_buffer_free(buf->buffer);
7706 buf->buffer = NULL;
7707 free_percpu(buf->data);
7708 buf->data = NULL;
7709 }
7710}
7711
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007712static void free_trace_buffers(struct trace_array *tr)
7713{
7714 if (!tr)
7715 return;
7716
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007717 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007718
7719#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007720 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007721#endif
7722}
7723
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007724static void init_trace_flags_index(struct trace_array *tr)
7725{
7726 int i;
7727
7728 /* Used by the trace options files */
7729 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7730 tr->trace_flags_index[i] = i;
7731}
7732
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007733static void __update_tracer_options(struct trace_array *tr)
7734{
7735 struct tracer *t;
7736
7737 for (t = trace_types; t; t = t->next)
7738 add_tracer_options(tr, t);
7739}
7740
7741static void update_tracer_options(struct trace_array *tr)
7742{
7743 mutex_lock(&trace_types_lock);
7744 __update_tracer_options(tr);
7745 mutex_unlock(&trace_types_lock);
7746}
7747
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007748static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007749{
Steven Rostedt277ba042012-08-03 16:10:49 -04007750 struct trace_array *tr;
7751 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007752
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007753 mutex_lock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007754 mutex_lock(&trace_types_lock);
7755
7756 ret = -EEXIST;
7757 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7758 if (tr->name && strcmp(tr->name, name) == 0)
7759 goto out_unlock;
7760 }
7761
7762 ret = -ENOMEM;
7763 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7764 if (!tr)
7765 goto out_unlock;
7766
7767 tr->name = kstrdup(name, GFP_KERNEL);
7768 if (!tr->name)
7769 goto out_free_tr;
7770
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007771 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7772 goto out_free_tr;
7773
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007774 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007775
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007776 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7777
Steven Rostedt277ba042012-08-03 16:10:49 -04007778 raw_spin_lock_init(&tr->start_lock);
7779
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007780 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7781
Steven Rostedt277ba042012-08-03 16:10:49 -04007782 tr->current_trace = &nop_trace;
7783
7784 INIT_LIST_HEAD(&tr->systems);
7785 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06007786 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt277ba042012-08-03 16:10:49 -04007787
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007788 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007789 goto out_free_tr;
7790
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007791 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007792 if (!tr->dir)
7793 goto out_free_tr;
7794
7795 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007796 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007797 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007798 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007799 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007800
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007801 ftrace_init_trace_array(tr);
7802
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007803 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007804 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007805 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007806
7807 list_add(&tr->list, &ftrace_trace_arrays);
7808
7809 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007810 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007811
7812 return 0;
7813
7814 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007815 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007816 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007817 kfree(tr->name);
7818 kfree(tr);
7819
7820 out_unlock:
7821 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007822 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007823
7824 return ret;
7825
7826}
7827
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007828static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007829{
7830 struct trace_array *tr;
7831 int found = 0;
7832 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007833 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007834
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007835 mutex_lock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007836 mutex_lock(&trace_types_lock);
7837
7838 ret = -ENODEV;
7839 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7840 if (tr->name && strcmp(tr->name, name) == 0) {
7841 found = 1;
7842 break;
7843 }
7844 }
7845 if (!found)
7846 goto out_unlock;
7847
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007848 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007849 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007850 goto out_unlock;
7851
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007852 list_del(&tr->list);
7853
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007854 /* Disable all the flags that were enabled coming in */
7855 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7856 if ((1 << i) & ZEROED_TRACE_FLAGS)
7857 set_tracer_flag(tr, 1 << i, 0);
7858 }
7859
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007860 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05307861 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007862 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09007863 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007864 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007865 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007866 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007867
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007868 for (i = 0; i < tr->nr_topts; i++) {
7869 kfree(tr->topts[i].topts);
7870 }
7871 kfree(tr->topts);
7872
Chunyu Hudb9108e02017-07-20 18:36:09 +08007873 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007874 kfree(tr->name);
7875 kfree(tr);
7876
7877 ret = 0;
7878
7879 out_unlock:
7880 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007881 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007882
7883 return ret;
7884}
7885
Steven Rostedt277ba042012-08-03 16:10:49 -04007886static __init void create_trace_instances(struct dentry *d_tracer)
7887{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007888 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7889 instance_mkdir,
7890 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007891 if (WARN_ON(!trace_instance_dir))
7892 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007893}
7894
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007895static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007896init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007897{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007898 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007899
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007900 trace_create_file("available_tracers", 0444, d_tracer,
7901 tr, &show_traces_fops);
7902
7903 trace_create_file("current_tracer", 0644, d_tracer,
7904 tr, &set_tracer_fops);
7905
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007906 trace_create_file("tracing_cpumask", 0644, d_tracer,
7907 tr, &tracing_cpumask_fops);
7908
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007909 trace_create_file("trace_options", 0644, d_tracer,
7910 tr, &tracing_iter_fops);
7911
7912 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007913 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007914
7915 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007916 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007917
7918 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007919 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007920
7921 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7922 tr, &tracing_total_entries_fops);
7923
Wang YanQing238ae932013-05-26 16:52:01 +08007924 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007925 tr, &tracing_free_buffer_fops);
7926
7927 trace_create_file("trace_marker", 0220, d_tracer,
7928 tr, &tracing_mark_fops);
7929
Steven Rostedtfa32e852016-07-06 15:25:08 -04007930 trace_create_file("trace_marker_raw", 0220, d_tracer,
7931 tr, &tracing_mark_raw_fops);
7932
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007933 trace_create_file("trace_clock", 0644, d_tracer, tr,
7934 &trace_clock_fops);
7935
7936 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007937 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007938
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007939 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
7940 &trace_time_stamp_mode_fops);
7941
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007942 create_trace_options_dir(tr);
7943
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007944#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007945 trace_create_file("tracing_max_latency", 0644, d_tracer,
7946 &tr->max_latency, &tracing_max_lat_fops);
7947#endif
7948
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007949 if (ftrace_create_function_files(tr, d_tracer))
7950 WARN(1, "Could not allocate function filter files");
7951
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007952#ifdef CONFIG_TRACER_SNAPSHOT
7953 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007954 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007955#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007956
7957 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007958 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007959
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007960 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007961}
7962
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007963static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007964{
7965 struct vfsmount *mnt;
7966 struct file_system_type *type;
7967
7968 /*
7969 * To maintain backward compatibility for tools that mount
7970 * debugfs to get to the tracing facility, tracefs is automatically
7971 * mounted to the debugfs/tracing directory.
7972 */
7973 type = get_fs_type("tracefs");
7974 if (!type)
7975 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007976 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007977 put_filesystem(type);
7978 if (IS_ERR(mnt))
7979 return NULL;
7980 mntget(mnt);
7981
7982 return mnt;
7983}
7984
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007985/**
7986 * tracing_init_dentry - initialize top level trace array
7987 *
7988 * This is called when creating files or directories in the tracing
7989 * directory. It is called via fs_initcall() by any of the boot up code
7990 * and expects to return the dentry of the top level tracing directory.
7991 */
7992struct dentry *tracing_init_dentry(void)
7993{
7994 struct trace_array *tr = &global_trace;
7995
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007996 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007997 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007998 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007999
Jiaxing Wang8b129192015-11-06 16:04:16 +08008000 if (WARN_ON(!tracefs_initialized()) ||
8001 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8002 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008003 return ERR_PTR(-ENODEV);
8004
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008005 /*
8006 * As there may still be users that expect the tracing
8007 * files to exist in debugfs/tracing, we must automount
8008 * the tracefs file system there, so older tools still
8009 * work with the newer kerenl.
8010 */
8011 tr->dir = debugfs_create_automount("tracing", NULL,
8012 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008013 if (!tr->dir) {
8014 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8015 return ERR_PTR(-ENOMEM);
8016 }
8017
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008018 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008019}
8020
Jeremy Linton00f4b652017-05-31 16:56:43 -05008021extern struct trace_eval_map *__start_ftrace_eval_maps[];
8022extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008023
Jeremy Linton5f60b352017-05-31 16:56:47 -05008024static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008025{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008026 int len;
8027
Jeremy Linton02fd7f62017-05-31 16:56:42 -05008028 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008029 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008030}
8031
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008032#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008033static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008034{
Jeremy Linton99be6472017-05-31 16:56:44 -05008035 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008036 return;
8037
8038 /*
8039 * Modules with bad taint do not have events created, do
8040 * not bother with enums either.
8041 */
8042 if (trace_module_has_bad_taint(mod))
8043 return;
8044
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008045 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008046}
8047
Jeremy Linton681bec02017-05-31 16:56:53 -05008048#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008049static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008050{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008051 union trace_eval_map_item *map;
8052 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008053
Jeremy Linton99be6472017-05-31 16:56:44 -05008054 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008055 return;
8056
Jeremy Linton1793ed92017-05-31 16:56:46 -05008057 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008058
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008059 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008060
8061 while (map) {
8062 if (map->head.mod == mod)
8063 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05008064 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008065 last = &map->tail.next;
8066 map = map->tail.next;
8067 }
8068 if (!map)
8069 goto out;
8070
Jeremy Linton5f60b352017-05-31 16:56:47 -05008071 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008072 kfree(map);
8073 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05008074 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008075}
8076#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008077static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05008078#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008079
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008080static int trace_module_notify(struct notifier_block *self,
8081 unsigned long val, void *data)
8082{
8083 struct module *mod = data;
8084
8085 switch (val) {
8086 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008087 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008088 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008089 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008090 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008091 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008092 }
8093
8094 return 0;
8095}
8096
8097static struct notifier_block trace_module_nb = {
8098 .notifier_call = trace_module_notify,
8099 .priority = 0,
8100};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008101#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008102
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008103static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008104{
8105 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008106
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08008107 trace_access_lock_init();
8108
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008109 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008110 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09008111 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008112
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008113 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04008114 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008115
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008116 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04008117 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008118
Li Zefan339ae5d2009-04-17 10:34:30 +08008119 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008120 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02008121
Avadh Patel69abe6a2009-04-10 16:04:48 -04008122 trace_create_file("saved_cmdlines", 0444, d_tracer,
8123 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03008124
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008125 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8126 NULL, &tracing_saved_cmdlines_size_fops);
8127
Michael Sartain99c621d2017-07-05 22:07:15 -06008128 trace_create_file("saved_tgids", 0444, d_tracer,
8129 NULL, &tracing_saved_tgids_fops);
8130
Jeremy Linton5f60b352017-05-31 16:56:47 -05008131 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008132
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008133 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008134
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008135#ifdef CONFIG_MODULES
8136 register_module_notifier(&trace_module_nb);
8137#endif
8138
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008139#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008140 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8141 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008142#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008143
Steven Rostedt277ba042012-08-03 16:10:49 -04008144 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09008145
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008146 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05008147
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01008148 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008149}
8150
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008151static int trace_panic_handler(struct notifier_block *this,
8152 unsigned long event, void *unused)
8153{
Steven Rostedt944ac422008-10-23 19:26:08 -04008154 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008155 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008156 return NOTIFY_OK;
8157}
8158
8159static struct notifier_block trace_panic_notifier = {
8160 .notifier_call = trace_panic_handler,
8161 .next = NULL,
8162 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8163};
8164
8165static int trace_die_handler(struct notifier_block *self,
8166 unsigned long val,
8167 void *data)
8168{
8169 switch (val) {
8170 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04008171 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008172 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008173 break;
8174 default:
8175 break;
8176 }
8177 return NOTIFY_OK;
8178}
8179
8180static struct notifier_block trace_die_notifier = {
8181 .notifier_call = trace_die_handler,
8182 .priority = 200
8183};
8184
8185/*
8186 * printk is set to max of 1024, we really don't need it that big.
8187 * Nothing should be printing 1000 characters anyway.
8188 */
8189#define TRACE_MAX_PRINT 1000
8190
8191/*
8192 * Define here KERN_TRACE so that we have one place to modify
8193 * it if we decide to change what log level the ftrace dump
8194 * should be at.
8195 */
Steven Rostedt428aee12009-01-14 12:24:42 -05008196#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008197
Jason Wessel955b61e2010-08-05 09:22:23 -05008198void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008199trace_printk_seq(struct trace_seq *s)
8200{
8201 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008202 if (s->seq.len >= TRACE_MAX_PRINT)
8203 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008204
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05008205 /*
8206 * More paranoid code. Although the buffer size is set to
8207 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8208 * an extra layer of protection.
8209 */
8210 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8211 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008212
8213 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008214 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008215
8216 printk(KERN_TRACE "%s", s->buffer);
8217
Steven Rostedtf9520752009-03-02 14:04:40 -05008218 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008219}
8220
Jason Wessel955b61e2010-08-05 09:22:23 -05008221void trace_init_global_iter(struct trace_iterator *iter)
8222{
8223 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008224 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05008225 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008226 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07008227
8228 if (iter->trace && iter->trace->open)
8229 iter->trace->open(iter);
8230
8231 /* Annotate start of buffers if we had overruns */
8232 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8233 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8234
8235 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8236 if (trace_clocks[iter->tr->clock_id].in_ns)
8237 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05008238}
8239
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008240void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008241{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008242 /* use static because iter can be a bit big for the stack */
8243 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008244 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008245 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008246 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04008247 unsigned long flags;
8248 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008249
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008250 /* Only allow one dump user at a time. */
8251 if (atomic_inc_return(&dump_running) != 1) {
8252 atomic_dec(&dump_running);
8253 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04008254 }
8255
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008256 /*
8257 * Always turn off tracing when we dump.
8258 * We don't need to show trace output of what happens
8259 * between multiple crashes.
8260 *
8261 * If the user does a sysrq-z, then they can re-enable
8262 * tracing with echo 1 > tracing_on.
8263 */
8264 tracing_off();
8265
8266 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008267
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08008268 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05008269 trace_init_global_iter(&iter);
8270
Steven Rostedtd7690412008-10-01 00:29:53 -04008271 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05308272 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04008273 }
8274
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008275 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008276
Török Edwinb54d3de2008-11-22 13:28:48 +02008277 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008278 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02008279
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008280 switch (oops_dump_mode) {
8281 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05008282 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008283 break;
8284 case DUMP_ORIG:
8285 iter.cpu_file = raw_smp_processor_id();
8286 break;
8287 case DUMP_NONE:
8288 goto out_enable;
8289 default:
8290 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05008291 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008292 }
8293
8294 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008295
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008296 /* Did function tracer already get disabled? */
8297 if (ftrace_is_dead()) {
8298 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8299 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8300 }
8301
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008302 /*
8303 * We need to stop all tracing on all CPUS to read the
8304 * the next buffer. This is a bit expensive, but is
8305 * not done often. We fill all what we can read,
8306 * and then release the locks again.
8307 */
8308
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008309 while (!trace_empty(&iter)) {
8310
8311 if (!cnt)
8312 printk(KERN_TRACE "---------------------------------\n");
8313
8314 cnt++;
8315
8316 /* reset all but tr, trace, and overruns */
8317 memset(&iter.seq, 0,
8318 sizeof(struct trace_iterator) -
8319 offsetof(struct trace_iterator, seq));
8320 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8321 iter.pos = -1;
8322
Jason Wessel955b61e2010-08-05 09:22:23 -05008323 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08008324 int ret;
8325
8326 ret = print_trace_line(&iter);
8327 if (ret != TRACE_TYPE_NO_CONSUME)
8328 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008329 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05008330 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008331
8332 trace_printk_seq(&iter.seq);
8333 }
8334
8335 if (!cnt)
8336 printk(KERN_TRACE " (ftrace buffer empty)\n");
8337 else
8338 printk(KERN_TRACE "---------------------------------\n");
8339
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008340 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008341 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008342
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008343 for_each_tracing_cpu(cpu) {
8344 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008345 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008346 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008347 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008348}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008349EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008350
Tom Zanussi7e465ba2017-09-22 14:58:20 -05008351int trace_run_command(const char *buf, int (*createfn)(int, char **))
8352{
8353 char **argv;
8354 int argc, ret;
8355
8356 argc = 0;
8357 ret = 0;
8358 argv = argv_split(GFP_KERNEL, buf, &argc);
8359 if (!argv)
8360 return -ENOMEM;
8361
8362 if (argc)
8363 ret = createfn(argc, argv);
8364
8365 argv_free(argv);
8366
8367 return ret;
8368}
8369
8370#define WRITE_BUFSIZE 4096
8371
8372ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8373 size_t count, loff_t *ppos,
8374 int (*createfn)(int, char **))
8375{
8376 char *kbuf, *buf, *tmp;
8377 int ret = 0;
8378 size_t done = 0;
8379 size_t size;
8380
8381 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8382 if (!kbuf)
8383 return -ENOMEM;
8384
8385 while (done < count) {
8386 size = count - done;
8387
8388 if (size >= WRITE_BUFSIZE)
8389 size = WRITE_BUFSIZE - 1;
8390
8391 if (copy_from_user(kbuf, buffer + done, size)) {
8392 ret = -EFAULT;
8393 goto out;
8394 }
8395 kbuf[size] = '\0';
8396 buf = kbuf;
8397 do {
8398 tmp = strchr(buf, '\n');
8399 if (tmp) {
8400 *tmp = '\0';
8401 size = tmp - buf + 1;
8402 } else {
8403 size = strlen(buf);
8404 if (done + size < count) {
8405 if (buf != kbuf)
8406 break;
8407 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8408 pr_warn("Line length is too long: Should be less than %d\n",
8409 WRITE_BUFSIZE - 2);
8410 ret = -EINVAL;
8411 goto out;
8412 }
8413 }
8414 done += size;
8415
8416 /* Remove comments */
8417 tmp = strchr(buf, '#');
8418
8419 if (tmp)
8420 *tmp = '\0';
8421
8422 ret = trace_run_command(buf, createfn);
8423 if (ret)
8424 goto out;
8425 buf += size;
8426
8427 } while (done < count);
8428 }
8429 ret = done;
8430
8431out:
8432 kfree(kbuf);
8433
8434 return ret;
8435}
8436
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008437__init static int tracer_alloc_buffers(void)
8438{
Steven Rostedt73c51622009-03-11 13:42:01 -04008439 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308440 int ret = -ENOMEM;
8441
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008442 /*
8443 * Make sure we don't accidently add more trace options
8444 * than we have bits for.
8445 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008446 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008447
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308448 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8449 goto out;
8450
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008451 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308452 goto out_free_buffer_mask;
8453
Steven Rostedt07d777f2011-09-22 14:01:55 -04008454 /* Only allocate trace_printk buffers if a trace_printk exists */
8455 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04008456 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04008457 trace_printk_init_buffers();
8458
Steven Rostedt73c51622009-03-11 13:42:01 -04008459 /* To save memory, keep the ring buffer size to its minimum */
8460 if (ring_buffer_expanded)
8461 ring_buf_size = trace_buf_size;
8462 else
8463 ring_buf_size = 1;
8464
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308465 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008466 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008467
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008468 raw_spin_lock_init(&global_trace.start_lock);
8469
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008470 /*
8471 * The prepare callbacks allocates some memory for the ring buffer. We
8472 * don't free the buffer if the if the CPU goes down. If we were to free
8473 * the buffer, then the user would lose any trace that was in the
8474 * buffer. The memory will be removed once the "instance" is removed.
8475 */
8476 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8477 "trace/RB:preapre", trace_rb_cpu_prepare,
8478 NULL);
8479 if (ret < 0)
8480 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008481 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03008482 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008483 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8484 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008485 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008486
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008487 if (trace_create_savedcmd() < 0)
8488 goto out_free_temp_buffer;
8489
Steven Rostedtab464282008-05-12 21:21:00 +02008490 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008491 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008492 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8493 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008494 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008495 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04008496
Steven Rostedt499e5472012-02-22 15:50:28 -05008497 if (global_trace.buffer_disabled)
8498 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008499
Steven Rostedte1e232c2014-02-10 23:38:46 -05008500 if (trace_boot_clock) {
8501 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8502 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008503 pr_warn("Trace clock %s not defined, going back to default\n",
8504 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05008505 }
8506
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008507 /*
8508 * register_tracer() might reference current_trace, so it
8509 * needs to be set before we register anything. This is
8510 * just a bootstrap of current_trace anyway.
8511 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008512 global_trace.current_trace = &nop_trace;
8513
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008514 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8515
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05008516 ftrace_init_global_array_ops(&global_trace);
8517
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008518 init_trace_flags_index(&global_trace);
8519
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008520 register_tracer(&nop_trace);
8521
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05008522 /* Function tracing may start here (via kernel command line) */
8523 init_function_trace();
8524
Steven Rostedt60a11772008-05-12 21:20:44 +02008525 /* All seems OK, enable tracing */
8526 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008527
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008528 atomic_notifier_chain_register(&panic_notifier_list,
8529 &trace_panic_notifier);
8530
8531 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008532
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008533 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8534
8535 INIT_LIST_HEAD(&global_trace.systems);
8536 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008537 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008538 list_add(&global_trace.list, &ftrace_trace_arrays);
8539
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08008540 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04008541
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008542 register_snapshot_cmd();
8543
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008544 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008545
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008546out_free_savedcmd:
8547 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008548out_free_temp_buffer:
8549 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008550out_rm_hp_state:
8551 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308552out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008553 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308554out_free_buffer_mask:
8555 free_cpumask_var(tracing_buffer_mask);
8556out:
8557 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008558}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008559
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008560void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008561{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008562 if (tracepoint_printk) {
8563 tracepoint_print_iter =
8564 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8565 if (WARN_ON(!tracepoint_print_iter))
8566 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008567 else
8568 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008569 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008570 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008571}
8572
8573void __init trace_init(void)
8574{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008575 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008576}
8577
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008578__init static int clear_boot_tracer(void)
8579{
8580 /*
8581 * The default tracer at boot buffer is an init section.
8582 * This function is called in lateinit. If we did not
8583 * find the boot tracer, then clear it out, to prevent
8584 * later registration from accessing the buffer that is
8585 * about to be freed.
8586 */
8587 if (!default_bootup_tracer)
8588 return 0;
8589
8590 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8591 default_bootup_tracer);
8592 default_bootup_tracer = NULL;
8593
8594 return 0;
8595}
8596
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008597fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04008598late_initcall_sync(clear_boot_tracer);