blob: 9f3f043ba3b7cc25bb159b9692a1baa42ae0bbf5 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040028#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080043#include <linux/trace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060044#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020045
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020046#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050047#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010049/*
Steven Rostedt73c51622009-03-11 13:42:01 -040050 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050053bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040054
55/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010059 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060 * at the same time, giving false positive or negative results.
61 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063
Steven Rostedtb2821ae2009-02-02 21:38:32 -050064/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
Li Zefan020e5f82009-07-01 10:47:05 +080067bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050068
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050069/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050072static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050073
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010074/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050079static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010081{
82 return 0;
83}
Steven Rostedt0f048702008-11-05 16:05:44 -050084
85/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040086 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070090static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040091
92/*
Steven Rostedt0f048702008-11-05 16:05:44 -050093 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
Hannes Eder4fd27352009-02-10 19:44:12 +010098static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050099
Jason Wessel955b61e2010-08-05 09:22:23 -0500100cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200101
Steven Rostedt944ac422008-10-23 19:26:08 -0400102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400116 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200117
118enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400119
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
Jeremy Linton681bec02017-05-31 16:56:53 -0500123#ifdef CONFIG_TRACE_EVAL_MAP_FILE
124/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500125struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400126 struct module *mod;
127 unsigned long length;
128};
129
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500130union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400131
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500132struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400133 /*
134 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500135 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400136 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500137 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400138 const char *end; /* points to NULL */
139};
140
Jeremy Linton1793ed92017-05-31 16:56:46 -0500141static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400142
143/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500144 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500148 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400149 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500150union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500151 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500152 struct trace_eval_map_head head;
153 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400154};
155
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500157#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400158
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500160
Li Zefanee6c2c12009-09-18 14:06:47 +0800161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100164
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165static bool allocate_snapshot;
166
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200167static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100168{
Chen Gang67012ab2013-04-08 12:06:44 +0800169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500170 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400171 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173 return 1;
174}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200175__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176
Steven Rostedt944ac422008-10-23 19:26:08 -0400177static int __init set_ftrace_dump_on_oops(char *str)
178{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200192
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400193static int __init stop_trace_on_warning(char *str)
194{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400197 return 1;
198}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200199__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400201static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400208__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500209
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static int __init set_trace_boot_options(char *str)
214{
Chen Gang67012ab2013-04-08 12:06:44 +0800215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
Steven Rostedte1e232c2014-02-10 23:38:46 -0500220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400238
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100239unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200265 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200269
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400270LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400303int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400309 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500310 return 1;
311 }
312
313 return 0;
314}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500315
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
Steven Rostedtd8275c42016-04-14 12:15:22 -0400322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800365 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400516 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700573 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400574 return trace_clock_local();
575
Alexander Z Lam94571582013-08-02 18:36:16 -0700576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400578
579 return ts;
580}
581
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100582u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
Steven Rostedt90369902008-11-05 16:05:44 -0500596int tracing_is_enabled(void)
597{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500605}
606
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200607/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400618
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200622static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700627DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500657 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
Steven Rostedtae3b5092013-01-23 15:22:59 -0500663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500673 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400730{
731}
732
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400733#endif
734
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400760void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200777/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700793 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800818 int pc;
819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800821 return 0;
822
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800823 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500824
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800868 int pc;
869
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800871 return 0;
872
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800873 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500874
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500896#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400897static void tracing_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500898{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500908 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400926
927/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800928 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500947EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953static int alloc_snapshot(struct trace_array *tr)
954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
Fabian Frederickad1438a2014-04-17 21:44:42 +0200971static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500983
984/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
999 ret = alloc_snapshot(tr);
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
1006/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001008 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001009 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001019 int ret;
1020
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001023 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001024
1025 tracing_snapshot();
1026}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001033EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001048void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
Steven Rostedt499e5472012-02-22 15:50:28 -05001065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001075 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
Steven Rostedt499e5472012-02-22 15:50:28 -05001085/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
Steven Rostedt (Red Hat)e7c15cd2016-06-23 12:45:36 -04001091int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
Steven Rostedt499e5472012-02-22 15:50:28 -05001098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001103 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001107static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001108{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001109 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001110
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111 if (!str)
1112 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001113 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001114 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001115 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001116 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001117 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001118 return 1;
1119}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001120__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001121
Tim Bird0e950172010-02-25 15:36:43 -08001122static int __init set_tracing_thresh(char *str)
1123{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001124 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001125 int ret;
1126
1127 if (!str)
1128 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001129 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001130 if (ret < 0)
1131 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001132 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
Steven Rostedt57f50be2008-05-12 21:20:44 +02001137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001146 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001147 */
1148#undef C
1149#define C(a, b) b
1150
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001151/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001152static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001153 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001154 NULL
1155};
1156
Zhaolei5079f322009-08-25 16:12:56 +08001157static struct {
1158 u64 (*func)(void);
1159 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001160 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001161} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001165 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Joel Fernandes80ec3552016-11-28 14:35:23 -08001169 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001170 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001171};
1172
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001173/*
1174 * trace_parser_get_init - gets the buffer for trace parser
1175 */
1176int trace_parser_get_init(struct trace_parser *parser, int size)
1177{
1178 memset(parser, 0, sizeof(*parser));
1179
1180 parser->buffer = kmalloc(size, GFP_KERNEL);
1181 if (!parser->buffer)
1182 return 1;
1183
1184 parser->size = size;
1185 return 0;
1186}
1187
1188/*
1189 * trace_parser_put - frees the buffer for trace parser
1190 */
1191void trace_parser_put(struct trace_parser *parser)
1192{
1193 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001194 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001195}
1196
1197/*
1198 * trace_get_user - reads the user input string separated by space
1199 * (matched by isspace(ch))
1200 *
1201 * For each string found the 'struct trace_parser' is updated,
1202 * and the function returns.
1203 *
1204 * Returns number of bytes read.
1205 *
1206 * See kernel/trace/trace.h for 'struct trace_parser' details.
1207 */
1208int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1209 size_t cnt, loff_t *ppos)
1210{
1211 char ch;
1212 size_t read = 0;
1213 ssize_t ret;
1214
1215 if (!*ppos)
1216 trace_parser_clear(parser);
1217
1218 ret = get_user(ch, ubuf++);
1219 if (ret)
1220 goto out;
1221
1222 read++;
1223 cnt--;
1224
1225 /*
1226 * The parser is not finished with the last write,
1227 * continue reading the user input without skipping spaces.
1228 */
1229 if (!parser->cont) {
1230 /* skip white space */
1231 while (cnt && isspace(ch)) {
1232 ret = get_user(ch, ubuf++);
1233 if (ret)
1234 goto out;
1235 read++;
1236 cnt--;
1237 }
1238
1239 /* only spaces were written */
1240 if (isspace(ch)) {
1241 *ppos += read;
1242 ret = read;
1243 goto out;
1244 }
1245
1246 parser->idx = 0;
1247 }
1248
1249 /* read the non-space input */
1250 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +08001251 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001252 parser->buffer[parser->idx++] = ch;
1253 else {
1254 ret = -EINVAL;
1255 goto out;
1256 }
1257 ret = get_user(ch, ubuf++);
1258 if (ret)
1259 goto out;
1260 read++;
1261 cnt--;
1262 }
1263
1264 /* We either got finished input or we have to wait for another call. */
1265 if (isspace(ch)) {
1266 parser->buffer[parser->idx] = 0;
1267 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001268 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001269 parser->cont = true;
1270 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -04001271 } else {
1272 ret = -EINVAL;
1273 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001274 }
1275
1276 *ppos += read;
1277 ret = read;
1278
1279out:
1280 return ret;
1281}
1282
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001283/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001284static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001285{
1286 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001287
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001288 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001289 return -EBUSY;
1290
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001291 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001292 if (cnt > len)
1293 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001294 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001295
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001296 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001297 return cnt;
1298}
1299
Tim Bird0e950172010-02-25 15:36:43 -08001300unsigned long __read_mostly tracing_thresh;
1301
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001302#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001303/*
1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001306 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001307 */
1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1310{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001311 struct trace_buffer *trace_buf = &tr->trace_buffer;
1312 struct trace_buffer *max_buf = &tr->max_buffer;
1313 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1314 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001315
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001316 max_buf->cpu = cpu;
1317 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001318
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001319 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001320 max_data->critical_start = data->critical_start;
1321 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001322
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001323 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001324 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001325 /*
1326 * If tsk == current, then use current_uid(), as that does not use
1327 * RCU. The irq tracer can be called out of RCU scope.
1328 */
1329 if (tsk == current)
1330 max_data->uid = current_uid();
1331 else
1332 max_data->uid = task_uid(tsk);
1333
Steven Rostedt8248ac02009-09-02 12:27:41 -04001334 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1335 max_data->policy = tsk->policy;
1336 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001337
1338 /* record this tasks comm */
1339 tracing_record_cmdline(tsk);
1340}
1341
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001342/**
1343 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1344 * @tr: tracer
1345 * @tsk: the task with the latency
1346 * @cpu: The cpu that initiated the trace.
1347 *
1348 * Flip the buffers between the @tr and the max_tr and record information
1349 * about which task was the cause of this latency.
1350 */
Ingo Molnare309b412008-05-12 21:20:51 +02001351void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001352update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1353{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001354 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001356 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001357 return;
1358
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001359 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001360
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001361 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001362 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001363 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001364 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001365 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001366
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001367 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001368
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001369 buf = tr->trace_buffer.buffer;
1370 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1371 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001372
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001373 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001374 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001375}
1376
1377/**
1378 * update_max_tr_single - only copy one trace over, and reset the rest
1379 * @tr - tracer
1380 * @tsk - task with the latency
1381 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001382 *
1383 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001384 */
Ingo Molnare309b412008-05-12 21:20:51 +02001385void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001386update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1387{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001388 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001389
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001390 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001391 return;
1392
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001393 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001394 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001395 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001396 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001397 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001398 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001399
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001400 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001401
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001402 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001403
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001404 if (ret == -EBUSY) {
1405 /*
1406 * We failed to swap the buffer due to a commit taking
1407 * place on this CPU. We fail to record, but we reset
1408 * the max trace buffer (no one writes directly to it)
1409 * and flag that it failed.
1410 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001411 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001412 "Failed to swap buffers due to commit in progress\n");
1413 }
1414
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001415 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001416
1417 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001418 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001419}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001420#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001421
Rabin Vincente30f53a2014-11-10 19:46:34 +01001422static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001423{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001424 /* Iterators are static, they should be filled or empty */
1425 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001426 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001427
Rabin Vincente30f53a2014-11-10 19:46:34 +01001428 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1429 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001430}
1431
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001432#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001433static bool selftests_can_run;
1434
1435struct trace_selftests {
1436 struct list_head list;
1437 struct tracer *type;
1438};
1439
1440static LIST_HEAD(postponed_selftests);
1441
1442static int save_selftest(struct tracer *type)
1443{
1444 struct trace_selftests *selftest;
1445
1446 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1447 if (!selftest)
1448 return -ENOMEM;
1449
1450 selftest->type = type;
1451 list_add(&selftest->list, &postponed_selftests);
1452 return 0;
1453}
1454
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001455static int run_tracer_selftest(struct tracer *type)
1456{
1457 struct trace_array *tr = &global_trace;
1458 struct tracer *saved_tracer = tr->current_trace;
1459 int ret;
1460
1461 if (!type->selftest || tracing_selftest_disabled)
1462 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001463
1464 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001465 * If a tracer registers early in boot up (before scheduling is
1466 * initialized and such), then do not run its selftests yet.
1467 * Instead, run it a little later in the boot process.
1468 */
1469 if (!selftests_can_run)
1470 return save_selftest(type);
1471
1472 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001473 * Run a selftest on this tracer.
1474 * Here we reset the trace buffer, and set the current
1475 * tracer to be this tracer. The tracer can then run some
1476 * internal tracing to verify that everything is in order.
1477 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001478 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001479 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001480
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001481 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001483#ifdef CONFIG_TRACER_MAX_TRACE
1484 if (type->use_max_tr) {
1485 /* If we expanded the buffers, make sure the max is expanded too */
1486 if (ring_buffer_expanded)
1487 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1488 RING_BUFFER_ALL_CPUS);
1489 tr->allocated_snapshot = true;
1490 }
1491#endif
1492
1493 /* the test is responsible for initializing and enabling */
1494 pr_info("Testing tracer %s: ", type->name);
1495 ret = type->selftest(type, tr);
1496 /* the test is responsible for resetting too */
1497 tr->current_trace = saved_tracer;
1498 if (ret) {
1499 printk(KERN_CONT "FAILED!\n");
1500 /* Add the warning after printing 'FAILED' */
1501 WARN_ON(1);
1502 return -1;
1503 }
1504 /* Only reset on passing, to avoid touching corrupted buffers */
1505 tracing_reset_online_cpus(&tr->trace_buffer);
1506
1507#ifdef CONFIG_TRACER_MAX_TRACE
1508 if (type->use_max_tr) {
1509 tr->allocated_snapshot = false;
1510
1511 /* Shrink the max buffer again */
1512 if (ring_buffer_expanded)
1513 ring_buffer_resize(tr->max_buffer.buffer, 1,
1514 RING_BUFFER_ALL_CPUS);
1515 }
1516#endif
1517
1518 printk(KERN_CONT "PASSED\n");
1519 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001521
1522static __init int init_trace_selftests(void)
1523{
1524 struct trace_selftests *p, *n;
1525 struct tracer *t, **last;
1526 int ret;
1527
1528 selftests_can_run = true;
1529
1530 mutex_lock(&trace_types_lock);
1531
1532 if (list_empty(&postponed_selftests))
1533 goto out;
1534
1535 pr_info("Running postponed tracer tests:\n");
1536
1537 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1538 ret = run_tracer_selftest(p->type);
1539 /* If the test fails, then warn and remove from available_tracers */
1540 if (ret < 0) {
1541 WARN(1, "tracer: %s failed selftest, disabling\n",
1542 p->type->name);
1543 last = &trace_types;
1544 for (t = trace_types; t; t = t->next) {
1545 if (t == p->type) {
1546 *last = t->next;
1547 break;
1548 }
1549 last = &t->next;
1550 }
1551 }
1552 list_del(&p->list);
1553 kfree(p);
1554 }
1555
1556 out:
1557 mutex_unlock(&trace_types_lock);
1558
1559 return 0;
1560}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001561core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001562#else
1563static inline int run_tracer_selftest(struct tracer *type)
1564{
1565 return 0;
1566}
1567#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001568
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001569static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1570
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001571static void __init apply_trace_boot_options(void);
1572
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001573/**
1574 * register_tracer - register a tracer with the ftrace system.
1575 * @type - the plugin for the tracer
1576 *
1577 * Register a new plugin tracer.
1578 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001579int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580{
1581 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001582 int ret = 0;
1583
1584 if (!type->name) {
1585 pr_info("Tracer must have a name\n");
1586 return -1;
1587 }
1588
Dan Carpenter24a461d2010-07-10 12:06:44 +02001589 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001590 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1591 return -1;
1592 }
1593
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001594 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001595
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001596 tracing_selftest_running = true;
1597
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001598 for (t = trace_types; t; t = t->next) {
1599 if (strcmp(type->name, t->name) == 0) {
1600 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001601 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 type->name);
1603 ret = -1;
1604 goto out;
1605 }
1606 }
1607
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001608 if (!type->set_flag)
1609 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001610 if (!type->flags) {
1611 /*allocate a dummy tracer_flags*/
1612 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001613 if (!type->flags) {
1614 ret = -ENOMEM;
1615 goto out;
1616 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001617 type->flags->val = 0;
1618 type->flags->opts = dummy_tracer_opt;
1619 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001620 if (!type->flags->opts)
1621 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001622
Chunyu Hud39cdd22016-03-08 21:37:01 +08001623 /* store the tracer for __set_tracer_option */
1624 type->flags->trace = type;
1625
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001626 ret = run_tracer_selftest(type);
1627 if (ret < 0)
1628 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001629
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001630 type->next = trace_types;
1631 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001632 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001633
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001634 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001635 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001636 mutex_unlock(&trace_types_lock);
1637
Steven Rostedtdac74942009-02-05 01:13:38 -05001638 if (ret || !default_bootup_tracer)
1639 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001640
Li Zefanee6c2c12009-09-18 14:06:47 +08001641 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001642 goto out_unlock;
1643
1644 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1645 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001646 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001647 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001648
1649 apply_trace_boot_options();
1650
Steven Rostedtdac74942009-02-05 01:13:38 -05001651 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001652 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001653#ifdef CONFIG_FTRACE_STARTUP_TEST
1654 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1655 type->name);
1656#endif
1657
1658 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001659 return ret;
1660}
1661
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001662void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001663{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001664 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001665
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001666 if (!buffer)
1667 return;
1668
Steven Rostedtf6339032009-09-04 12:35:16 -04001669 ring_buffer_record_disable(buffer);
1670
1671 /* Make sure all commits have finished */
1672 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001673 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001674
1675 ring_buffer_record_enable(buffer);
1676}
1677
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001678void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001679{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001680 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001681 int cpu;
1682
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001683 if (!buffer)
1684 return;
1685
Steven Rostedt621968c2009-09-04 12:02:35 -04001686 ring_buffer_record_disable(buffer);
1687
1688 /* Make sure all commits have finished */
1689 synchronize_sched();
1690
Alexander Z Lam94571582013-08-02 18:36:16 -07001691 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001692
1693 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001694 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001695
1696 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001697}
1698
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001699/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001700void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001701{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001702 struct trace_array *tr;
1703
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001704 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04001705 if (!tr->clear_trace)
1706 continue;
1707 tr->clear_trace = false;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001708 tracing_reset_online_cpus(&tr->trace_buffer);
1709#ifdef CONFIG_TRACER_MAX_TRACE
1710 tracing_reset_online_cpus(&tr->max_buffer);
1711#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001712 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001713}
1714
Joel Fernandesd914ba32017-06-26 19:01:55 -07001715static int *tgid_map;
1716
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001717#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001718#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001719static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001720struct saved_cmdlines_buffer {
1721 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1722 unsigned *map_cmdline_to_pid;
1723 unsigned cmdline_num;
1724 int cmdline_idx;
1725 char *saved_cmdlines;
1726};
1727static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001728
Steven Rostedt25b0b442008-05-12 21:21:00 +02001729/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07001730static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001731
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001732static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001733{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001734 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1735}
1736
1737static inline void set_cmdline(int idx, const char *cmdline)
1738{
1739 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1740}
1741
1742static int allocate_cmdlines_buffer(unsigned int val,
1743 struct saved_cmdlines_buffer *s)
1744{
1745 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1746 GFP_KERNEL);
1747 if (!s->map_cmdline_to_pid)
1748 return -ENOMEM;
1749
1750 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1751 if (!s->saved_cmdlines) {
1752 kfree(s->map_cmdline_to_pid);
1753 return -ENOMEM;
1754 }
1755
1756 s->cmdline_idx = 0;
1757 s->cmdline_num = val;
1758 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1759 sizeof(s->map_pid_to_cmdline));
1760 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1761 val * sizeof(*s->map_cmdline_to_pid));
1762
1763 return 0;
1764}
1765
1766static int trace_create_savedcmd(void)
1767{
1768 int ret;
1769
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001770 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001771 if (!savedcmd)
1772 return -ENOMEM;
1773
1774 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1775 if (ret < 0) {
1776 kfree(savedcmd);
1777 savedcmd = NULL;
1778 return -ENOMEM;
1779 }
1780
1781 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001782}
1783
Carsten Emdeb5130b12009-09-13 01:43:07 +02001784int is_tracing_stopped(void)
1785{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001786 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001787}
1788
Steven Rostedt0f048702008-11-05 16:05:44 -05001789/**
1790 * tracing_start - quick start of the tracer
1791 *
1792 * If tracing is enabled but was stopped by tracing_stop,
1793 * this will start the tracer back up.
1794 */
1795void tracing_start(void)
1796{
1797 struct ring_buffer *buffer;
1798 unsigned long flags;
1799
1800 if (tracing_disabled)
1801 return;
1802
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001803 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1804 if (--global_trace.stop_count) {
1805 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001806 /* Someone screwed up their debugging */
1807 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001808 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001809 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001810 goto out;
1811 }
1812
Steven Rostedta2f80712010-03-12 19:56:00 -05001813 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001814 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001815
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001816 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001817 if (buffer)
1818 ring_buffer_record_enable(buffer);
1819
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001820#ifdef CONFIG_TRACER_MAX_TRACE
1821 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001822 if (buffer)
1823 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001824#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001825
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001826 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001827
Steven Rostedt0f048702008-11-05 16:05:44 -05001828 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001829 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1830}
1831
1832static void tracing_start_tr(struct trace_array *tr)
1833{
1834 struct ring_buffer *buffer;
1835 unsigned long flags;
1836
1837 if (tracing_disabled)
1838 return;
1839
1840 /* If global, we need to also start the max tracer */
1841 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1842 return tracing_start();
1843
1844 raw_spin_lock_irqsave(&tr->start_lock, flags);
1845
1846 if (--tr->stop_count) {
1847 if (tr->stop_count < 0) {
1848 /* Someone screwed up their debugging */
1849 WARN_ON_ONCE(1);
1850 tr->stop_count = 0;
1851 }
1852 goto out;
1853 }
1854
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001855 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001856 if (buffer)
1857 ring_buffer_record_enable(buffer);
1858
1859 out:
1860 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001861}
1862
1863/**
1864 * tracing_stop - quick stop of the tracer
1865 *
1866 * Light weight way to stop tracing. Use in conjunction with
1867 * tracing_start.
1868 */
1869void tracing_stop(void)
1870{
1871 struct ring_buffer *buffer;
1872 unsigned long flags;
1873
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001874 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1875 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001876 goto out;
1877
Steven Rostedta2f80712010-03-12 19:56:00 -05001878 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001879 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001880
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001881 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001882 if (buffer)
1883 ring_buffer_record_disable(buffer);
1884
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001885#ifdef CONFIG_TRACER_MAX_TRACE
1886 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001887 if (buffer)
1888 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001889#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001890
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001891 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001892
Steven Rostedt0f048702008-11-05 16:05:44 -05001893 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001894 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1895}
1896
1897static void tracing_stop_tr(struct trace_array *tr)
1898{
1899 struct ring_buffer *buffer;
1900 unsigned long flags;
1901
1902 /* If global, we need to also stop the max tracer */
1903 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1904 return tracing_stop();
1905
1906 raw_spin_lock_irqsave(&tr->start_lock, flags);
1907 if (tr->stop_count++)
1908 goto out;
1909
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001910 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001911 if (buffer)
1912 ring_buffer_record_disable(buffer);
1913
1914 out:
1915 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001916}
1917
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001918static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001919{
Carsten Emdea635cf02009-03-18 09:00:41 +01001920 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001921
Joel Fernandeseaf260a2017-07-06 16:00:21 -07001922 /* treat recording of idle task as a success */
1923 if (!tsk->pid)
1924 return 1;
1925
1926 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001927 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001928
1929 /*
1930 * It's not the end of the world if we don't get
1931 * the lock, but we also don't want to spin
1932 * nor do we want to disable interrupts,
1933 * so if we miss here, then better luck next time.
1934 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001935 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001936 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001937
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001938 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001939 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001940 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001941
Carsten Emdea635cf02009-03-18 09:00:41 +01001942 /*
1943 * Check whether the cmdline buffer at idx has a pid
1944 * mapped. We are going to overwrite that entry so we
1945 * need to clear the map_pid_to_cmdline. Otherwise we
1946 * would read the new comm for the old pid.
1947 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001948 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001949 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001950 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001951
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001952 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1953 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001954
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001955 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001956 }
1957
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001958 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001959
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001960 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001961
1962 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001963}
1964
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001965static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001966{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001967 unsigned map;
1968
Steven Rostedt4ca530852009-03-16 19:20:15 -04001969 if (!pid) {
1970 strcpy(comm, "<idle>");
1971 return;
1972 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001973
Steven Rostedt74bf4072010-01-25 15:11:53 -05001974 if (WARN_ON_ONCE(pid < 0)) {
1975 strcpy(comm, "<XXX>");
1976 return;
1977 }
1978
Steven Rostedt4ca530852009-03-16 19:20:15 -04001979 if (pid > PID_MAX_DEFAULT) {
1980 strcpy(comm, "<...>");
1981 return;
1982 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001983
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001984 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001985 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05301986 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01001987 else
1988 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001989}
1990
1991void trace_find_cmdline(int pid, char comm[])
1992{
1993 preempt_disable();
1994 arch_spin_lock(&trace_cmdline_lock);
1995
1996 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001997
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001998 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001999 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002000}
2001
Joel Fernandesd914ba32017-06-26 19:01:55 -07002002int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002003{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002004 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2005 return 0;
2006
2007 return tgid_map[pid];
2008}
2009
2010static int trace_save_tgid(struct task_struct *tsk)
2011{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002012 /* treat recording of idle task as a success */
2013 if (!tsk->pid)
2014 return 1;
2015
2016 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002017 return 0;
2018
2019 tgid_map[tsk->pid] = tsk->tgid;
2020 return 1;
2021}
2022
2023static bool tracing_record_taskinfo_skip(int flags)
2024{
2025 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2026 return true;
2027 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2028 return true;
2029 if (!__this_cpu_read(trace_taskinfo_save))
2030 return true;
2031 return false;
2032}
2033
2034/**
2035 * tracing_record_taskinfo - record the task info of a task
2036 *
2037 * @task - task to record
2038 * @flags - TRACE_RECORD_CMDLINE for recording comm
2039 * - TRACE_RECORD_TGID for recording tgid
2040 */
2041void tracing_record_taskinfo(struct task_struct *task, int flags)
2042{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002043 bool done;
2044
Joel Fernandesd914ba32017-06-26 19:01:55 -07002045 if (tracing_record_taskinfo_skip(flags))
2046 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002047
2048 /*
2049 * Record as much task information as possible. If some fail, continue
2050 * to try to record the others.
2051 */
2052 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2053 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2054
2055 /* If recording any information failed, retry again soon. */
2056 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002057 return;
2058
Joel Fernandesd914ba32017-06-26 19:01:55 -07002059 __this_cpu_write(trace_taskinfo_save, false);
2060}
2061
2062/**
2063 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2064 *
2065 * @prev - previous task during sched_switch
2066 * @next - next task during sched_switch
2067 * @flags - TRACE_RECORD_CMDLINE for recording comm
2068 * TRACE_RECORD_TGID for recording tgid
2069 */
2070void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2071 struct task_struct *next, int flags)
2072{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002073 bool done;
2074
Joel Fernandesd914ba32017-06-26 19:01:55 -07002075 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002076 return;
2077
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002078 /*
2079 * Record as much task information as possible. If some fail, continue
2080 * to try to record the others.
2081 */
2082 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2083 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2084 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2085 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002086
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002087 /* If recording any information failed, retry again soon. */
2088 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002089 return;
2090
2091 __this_cpu_write(trace_taskinfo_save, false);
2092}
2093
2094/* Helpers to record a specific task information */
2095void tracing_record_cmdline(struct task_struct *task)
2096{
2097 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2098}
2099
2100void tracing_record_tgid(struct task_struct *task)
2101{
2102 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002103}
2104
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002105/*
2106 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2107 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2108 * simplifies those functions and keeps them in sync.
2109 */
2110enum print_line_t trace_handle_return(struct trace_seq *s)
2111{
2112 return trace_seq_has_overflowed(s) ?
2113 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2114}
2115EXPORT_SYMBOL_GPL(trace_handle_return);
2116
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002117void
Steven Rostedt38697052008-10-01 13:14:09 -04002118tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2119 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002120{
2121 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002122
Steven Rostedt777e2082008-09-29 23:02:42 -04002123 entry->preempt_count = pc & 0xff;
2124 entry->pid = (tsk) ? tsk->pid : 0;
2125 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002126#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002127 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002128#else
2129 TRACE_FLAG_IRQS_NOSUPPORT |
2130#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002131 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002132 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302133 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002134 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2135 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002136}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002137EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002138
Steven Rostedte77405a2009-09-02 14:17:06 -04002139struct ring_buffer_event *
2140trace_buffer_lock_reserve(struct ring_buffer *buffer,
2141 int type,
2142 unsigned long len,
2143 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002144{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002145 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002146}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002147
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002148DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2149DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2150static int trace_buffered_event_ref;
2151
2152/**
2153 * trace_buffered_event_enable - enable buffering events
2154 *
2155 * When events are being filtered, it is quicker to use a temporary
2156 * buffer to write the event data into if there's a likely chance
2157 * that it will not be committed. The discard of the ring buffer
2158 * is not as fast as committing, and is much slower than copying
2159 * a commit.
2160 *
2161 * When an event is to be filtered, allocate per cpu buffers to
2162 * write the event data into, and if the event is filtered and discarded
2163 * it is simply dropped, otherwise, the entire data is to be committed
2164 * in one shot.
2165 */
2166void trace_buffered_event_enable(void)
2167{
2168 struct ring_buffer_event *event;
2169 struct page *page;
2170 int cpu;
2171
2172 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2173
2174 if (trace_buffered_event_ref++)
2175 return;
2176
2177 for_each_tracing_cpu(cpu) {
2178 page = alloc_pages_node(cpu_to_node(cpu),
2179 GFP_KERNEL | __GFP_NORETRY, 0);
2180 if (!page)
2181 goto failed;
2182
2183 event = page_address(page);
2184 memset(event, 0, sizeof(*event));
2185
2186 per_cpu(trace_buffered_event, cpu) = event;
2187
2188 preempt_disable();
2189 if (cpu == smp_processor_id() &&
2190 this_cpu_read(trace_buffered_event) !=
2191 per_cpu(trace_buffered_event, cpu))
2192 WARN_ON_ONCE(1);
2193 preempt_enable();
2194 }
2195
2196 return;
2197 failed:
2198 trace_buffered_event_disable();
2199}
2200
2201static void enable_trace_buffered_event(void *data)
2202{
2203 /* Probably not needed, but do it anyway */
2204 smp_rmb();
2205 this_cpu_dec(trace_buffered_event_cnt);
2206}
2207
2208static void disable_trace_buffered_event(void *data)
2209{
2210 this_cpu_inc(trace_buffered_event_cnt);
2211}
2212
2213/**
2214 * trace_buffered_event_disable - disable buffering events
2215 *
2216 * When a filter is removed, it is faster to not use the buffered
2217 * events, and to commit directly into the ring buffer. Free up
2218 * the temp buffers when there are no more users. This requires
2219 * special synchronization with current events.
2220 */
2221void trace_buffered_event_disable(void)
2222{
2223 int cpu;
2224
2225 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2226
2227 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2228 return;
2229
2230 if (--trace_buffered_event_ref)
2231 return;
2232
2233 preempt_disable();
2234 /* For each CPU, set the buffer as used. */
2235 smp_call_function_many(tracing_buffer_mask,
2236 disable_trace_buffered_event, NULL, 1);
2237 preempt_enable();
2238
2239 /* Wait for all current users to finish */
2240 synchronize_sched();
2241
2242 for_each_tracing_cpu(cpu) {
2243 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2244 per_cpu(trace_buffered_event, cpu) = NULL;
2245 }
2246 /*
2247 * Make sure trace_buffered_event is NULL before clearing
2248 * trace_buffered_event_cnt.
2249 */
2250 smp_wmb();
2251
2252 preempt_disable();
2253 /* Do the work on each cpu */
2254 smp_call_function_many(tracing_buffer_mask,
2255 enable_trace_buffered_event, NULL, 1);
2256 preempt_enable();
2257}
2258
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002259static struct ring_buffer *temp_buffer;
2260
Steven Rostedtef5580d2009-02-27 19:38:04 -05002261struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002262trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002263 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002264 int type, unsigned long len,
2265 unsigned long flags, int pc)
2266{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002267 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002268 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002269
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002270 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002271
2272 if ((trace_file->flags &
2273 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2274 (entry = this_cpu_read(trace_buffered_event))) {
2275 /* Try to use the per cpu buffer first */
2276 val = this_cpu_inc_return(trace_buffered_event_cnt);
2277 if (val == 1) {
2278 trace_event_setup(entry, type, flags, pc);
2279 entry->array[0] = len;
2280 return entry;
2281 }
2282 this_cpu_dec(trace_buffered_event_cnt);
2283 }
2284
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002285 entry = __trace_buffer_lock_reserve(*current_rb,
2286 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002287 /*
2288 * If tracing is off, but we have triggers enabled
2289 * we still need to look at the event data. Use the temp_buffer
2290 * to store the trace event for the tigger to use. It's recusive
2291 * safe and will not be recorded anywhere.
2292 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002293 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002294 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002295 entry = __trace_buffer_lock_reserve(*current_rb,
2296 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002297 }
2298 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002299}
2300EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2301
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002302static DEFINE_SPINLOCK(tracepoint_iter_lock);
2303static DEFINE_MUTEX(tracepoint_printk_mutex);
2304
2305static void output_printk(struct trace_event_buffer *fbuffer)
2306{
2307 struct trace_event_call *event_call;
2308 struct trace_event *event;
2309 unsigned long flags;
2310 struct trace_iterator *iter = tracepoint_print_iter;
2311
2312 /* We should never get here if iter is NULL */
2313 if (WARN_ON_ONCE(!iter))
2314 return;
2315
2316 event_call = fbuffer->trace_file->event_call;
2317 if (!event_call || !event_call->event.funcs ||
2318 !event_call->event.funcs->trace)
2319 return;
2320
2321 event = &fbuffer->trace_file->event_call->event;
2322
2323 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2324 trace_seq_init(&iter->seq);
2325 iter->ent = fbuffer->entry;
2326 event_call->event.funcs->trace(iter, 0, event);
2327 trace_seq_putc(&iter->seq, 0);
2328 printk("%s", iter->seq.buffer);
2329
2330 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2331}
2332
2333int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2334 void __user *buffer, size_t *lenp,
2335 loff_t *ppos)
2336{
2337 int save_tracepoint_printk;
2338 int ret;
2339
2340 mutex_lock(&tracepoint_printk_mutex);
2341 save_tracepoint_printk = tracepoint_printk;
2342
2343 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2344
2345 /*
2346 * This will force exiting early, as tracepoint_printk
2347 * is always zero when tracepoint_printk_iter is not allocated
2348 */
2349 if (!tracepoint_print_iter)
2350 tracepoint_printk = 0;
2351
2352 if (save_tracepoint_printk == tracepoint_printk)
2353 goto out;
2354
2355 if (tracepoint_printk)
2356 static_key_enable(&tracepoint_printk_key.key);
2357 else
2358 static_key_disable(&tracepoint_printk_key.key);
2359
2360 out:
2361 mutex_unlock(&tracepoint_printk_mutex);
2362
2363 return ret;
2364}
2365
2366void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2367{
2368 if (static_key_false(&tracepoint_printk_key.key))
2369 output_printk(fbuffer);
2370
2371 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2372 fbuffer->event, fbuffer->entry,
2373 fbuffer->flags, fbuffer->pc);
2374}
2375EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2376
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002377void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2378 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002379 struct ring_buffer_event *event,
2380 unsigned long flags, int pc,
2381 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002382{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002383 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002384
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002385 /*
2386 * If regs is not set, then skip the following callers:
2387 * trace_buffer_unlock_commit_regs
2388 * event_trigger_unlock_commit
2389 * trace_event_buffer_commit
2390 * trace_event_raw_event_sched_switch
2391 * Note, we can still get here via blktrace, wakeup tracer
2392 * and mmiotrace, but that's ok if they lose a function or
2393 * two. They are that meaningful.
2394 */
2395 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002396 ftrace_trace_userstack(buffer, flags, pc);
2397}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002398
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002399/*
2400 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2401 */
2402void
2403trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2404 struct ring_buffer_event *event)
2405{
2406 __buffer_unlock_commit(buffer, event);
2407}
2408
Chunyan Zhang478409d2016-11-21 15:57:18 +08002409static void
2410trace_process_export(struct trace_export *export,
2411 struct ring_buffer_event *event)
2412{
2413 struct trace_entry *entry;
2414 unsigned int size = 0;
2415
2416 entry = ring_buffer_event_data(event);
2417 size = ring_buffer_event_length(event);
2418 export->write(entry, size);
2419}
2420
2421static DEFINE_MUTEX(ftrace_export_lock);
2422
2423static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2424
2425static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2426
2427static inline void ftrace_exports_enable(void)
2428{
2429 static_branch_enable(&ftrace_exports_enabled);
2430}
2431
2432static inline void ftrace_exports_disable(void)
2433{
2434 static_branch_disable(&ftrace_exports_enabled);
2435}
2436
2437void ftrace_exports(struct ring_buffer_event *event)
2438{
2439 struct trace_export *export;
2440
2441 preempt_disable_notrace();
2442
2443 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2444 while (export) {
2445 trace_process_export(export, event);
2446 export = rcu_dereference_raw_notrace(export->next);
2447 }
2448
2449 preempt_enable_notrace();
2450}
2451
2452static inline void
2453add_trace_export(struct trace_export **list, struct trace_export *export)
2454{
2455 rcu_assign_pointer(export->next, *list);
2456 /*
2457 * We are entering export into the list but another
2458 * CPU might be walking that list. We need to make sure
2459 * the export->next pointer is valid before another CPU sees
2460 * the export pointer included into the list.
2461 */
2462 rcu_assign_pointer(*list, export);
2463}
2464
2465static inline int
2466rm_trace_export(struct trace_export **list, struct trace_export *export)
2467{
2468 struct trace_export **p;
2469
2470 for (p = list; *p != NULL; p = &(*p)->next)
2471 if (*p == export)
2472 break;
2473
2474 if (*p != export)
2475 return -1;
2476
2477 rcu_assign_pointer(*p, (*p)->next);
2478
2479 return 0;
2480}
2481
2482static inline void
2483add_ftrace_export(struct trace_export **list, struct trace_export *export)
2484{
2485 if (*list == NULL)
2486 ftrace_exports_enable();
2487
2488 add_trace_export(list, export);
2489}
2490
2491static inline int
2492rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2493{
2494 int ret;
2495
2496 ret = rm_trace_export(list, export);
2497 if (*list == NULL)
2498 ftrace_exports_disable();
2499
2500 return ret;
2501}
2502
2503int register_ftrace_export(struct trace_export *export)
2504{
2505 if (WARN_ON_ONCE(!export->write))
2506 return -1;
2507
2508 mutex_lock(&ftrace_export_lock);
2509
2510 add_ftrace_export(&ftrace_exports_list, export);
2511
2512 mutex_unlock(&ftrace_export_lock);
2513
2514 return 0;
2515}
2516EXPORT_SYMBOL_GPL(register_ftrace_export);
2517
2518int unregister_ftrace_export(struct trace_export *export)
2519{
2520 int ret;
2521
2522 mutex_lock(&ftrace_export_lock);
2523
2524 ret = rm_ftrace_export(&ftrace_exports_list, export);
2525
2526 mutex_unlock(&ftrace_export_lock);
2527
2528 return ret;
2529}
2530EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2531
Ingo Molnare309b412008-05-12 21:20:51 +02002532void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002533trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002534 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2535 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002536{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002537 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002538 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002539 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002540 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002541
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002542 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2543 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002544 if (!event)
2545 return;
2546 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002547 entry->ip = ip;
2548 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002549
Chunyan Zhang478409d2016-11-21 15:57:18 +08002550 if (!call_filter_check_discard(call, entry, buffer, event)) {
2551 if (static_branch_unlikely(&ftrace_exports_enabled))
2552 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002553 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002554 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002555}
2556
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002557#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002558
2559#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2560struct ftrace_stack {
2561 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2562};
2563
2564static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2565static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2566
Steven Rostedte77405a2009-09-02 14:17:06 -04002567static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002568 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002569 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002570{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002571 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002572 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002573 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002574 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002575 int use_stack;
2576 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002577
2578 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002579 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002580
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002581 /*
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002582 * Add two, for this function and the call to save_stack_trace()
2583 * If regs is set, then these functions will not be in the way.
2584 */
2585 if (!regs)
2586 trace.skip += 2;
2587
2588 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002589 * Since events can happen in NMIs there's no safe way to
2590 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2591 * or NMI comes in, it will just have to use the default
2592 * FTRACE_STACK_SIZE.
2593 */
2594 preempt_disable_notrace();
2595
Shan Wei82146522012-11-19 13:21:01 +08002596 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002597 /*
2598 * We don't need any atomic variables, just a barrier.
2599 * If an interrupt comes in, we don't care, because it would
2600 * have exited and put the counter back to what we want.
2601 * We just need a barrier to keep gcc from moving things
2602 * around.
2603 */
2604 barrier();
2605 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002606 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002607 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2608
2609 if (regs)
2610 save_stack_trace_regs(regs, &trace);
2611 else
2612 save_stack_trace(&trace);
2613
2614 if (trace.nr_entries > size)
2615 size = trace.nr_entries;
2616 } else
2617 /* From now on, use_stack is a boolean */
2618 use_stack = 0;
2619
2620 size *= sizeof(unsigned long);
2621
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002622 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2623 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002624 if (!event)
2625 goto out;
2626 entry = ring_buffer_event_data(event);
2627
2628 memset(&entry->caller, 0, size);
2629
2630 if (use_stack)
2631 memcpy(&entry->caller, trace.entries,
2632 trace.nr_entries * sizeof(unsigned long));
2633 else {
2634 trace.max_entries = FTRACE_STACK_ENTRIES;
2635 trace.entries = entry->caller;
2636 if (regs)
2637 save_stack_trace_regs(regs, &trace);
2638 else
2639 save_stack_trace(&trace);
2640 }
2641
2642 entry->size = trace.nr_entries;
2643
Tom Zanussif306cc82013-10-24 08:34:17 -05002644 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002645 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002646
2647 out:
2648 /* Again, don't let gcc optimize things here */
2649 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002650 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002651 preempt_enable_notrace();
2652
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002653}
2654
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002655static inline void ftrace_trace_stack(struct trace_array *tr,
2656 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002657 unsigned long flags,
2658 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002659{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002660 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002661 return;
2662
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002663 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002664}
2665
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002666void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2667 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002668{
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002669 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2670
2671 if (rcu_is_watching()) {
2672 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2673 return;
2674 }
2675
2676 /*
2677 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2678 * but if the above rcu_is_watching() failed, then the NMI
2679 * triggered someplace critical, and rcu_irq_enter() should
2680 * not be called from NMI.
2681 */
2682 if (unlikely(in_nmi()))
2683 return;
2684
2685 /*
2686 * It is possible that a function is being traced in a
2687 * location that RCU is not watching. A call to
2688 * rcu_irq_enter() will make sure that it is, but there's
2689 * a few internal rcu functions that could be traced
2690 * where that wont work either. In those cases, we just
2691 * do nothing.
2692 */
2693 if (unlikely(rcu_irq_enter_disabled()))
2694 return;
2695
2696 rcu_irq_enter_irqson();
2697 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2698 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04002699}
2700
Steven Rostedt03889382009-12-11 09:48:22 -05002701/**
2702 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002703 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002704 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002705void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002706{
2707 unsigned long flags;
2708
2709 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002710 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002711
2712 local_save_flags(flags);
2713
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002714 /*
2715 * Skip 3 more, seems to get us at the caller of
2716 * this function.
2717 */
2718 skip += 3;
2719 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2720 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002721}
2722
Steven Rostedt91e86e52010-11-10 12:56:12 +01002723static DEFINE_PER_CPU(int, user_stack_count);
2724
Steven Rostedte77405a2009-09-02 14:17:06 -04002725void
2726ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002727{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002728 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002729 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002730 struct userstack_entry *entry;
2731 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002732
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002733 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002734 return;
2735
Steven Rostedtb6345872010-03-12 20:03:30 -05002736 /*
2737 * NMIs can not handle page faults, even with fix ups.
2738 * The save user stack can (and often does) fault.
2739 */
2740 if (unlikely(in_nmi()))
2741 return;
2742
Steven Rostedt91e86e52010-11-10 12:56:12 +01002743 /*
2744 * prevent recursion, since the user stack tracing may
2745 * trigger other kernel events.
2746 */
2747 preempt_disable();
2748 if (__this_cpu_read(user_stack_count))
2749 goto out;
2750
2751 __this_cpu_inc(user_stack_count);
2752
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002753 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2754 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002755 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002756 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002757 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002758
Steven Rostedt48659d32009-09-11 11:36:23 -04002759 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002760 memset(&entry->caller, 0, sizeof(entry->caller));
2761
2762 trace.nr_entries = 0;
2763 trace.max_entries = FTRACE_STACK_ENTRIES;
2764 trace.skip = 0;
2765 trace.entries = entry->caller;
2766
2767 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002768 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002769 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002770
Li Zefan1dbd1952010-12-09 15:47:56 +08002771 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002772 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002773 out:
2774 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002775}
2776
Hannes Eder4fd27352009-02-10 19:44:12 +01002777#ifdef UNUSED
2778static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002779{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002780 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002781}
Hannes Eder4fd27352009-02-10 19:44:12 +01002782#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002783
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002784#endif /* CONFIG_STACKTRACE */
2785
Steven Rostedt07d777f2011-09-22 14:01:55 -04002786/* created for use with alloc_percpu */
2787struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002788 int nesting;
2789 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002790};
2791
2792static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002793
2794/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002795 * Thise allows for lockless recording. If we're nested too deeply, then
2796 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002797 */
2798static char *get_trace_buf(void)
2799{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002800 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002801
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002802 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002803 return NULL;
2804
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002805 buffer->nesting++;
2806
2807 /* Interrupts must see nesting incremented before we use the buffer */
2808 barrier();
2809 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002810}
2811
2812static void put_trace_buf(void)
2813{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002814 /* Don't let the decrement of nesting leak before this */
2815 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002816 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002817}
2818
2819static int alloc_percpu_trace_buffer(void)
2820{
2821 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002822
2823 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002824 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2825 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002826
2827 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002828 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002829}
2830
Steven Rostedt81698832012-10-11 10:15:05 -04002831static int buffers_allocated;
2832
Steven Rostedt07d777f2011-09-22 14:01:55 -04002833void trace_printk_init_buffers(void)
2834{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002835 if (buffers_allocated)
2836 return;
2837
2838 if (alloc_percpu_trace_buffer())
2839 return;
2840
Steven Rostedt2184db42014-05-28 13:14:40 -04002841 /* trace_printk() is for debug use only. Don't use it in production. */
2842
Joe Perchesa395d6a2016-03-22 14:28:09 -07002843 pr_warn("\n");
2844 pr_warn("**********************************************************\n");
2845 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2846 pr_warn("** **\n");
2847 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2848 pr_warn("** **\n");
2849 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2850 pr_warn("** unsafe for production use. **\n");
2851 pr_warn("** **\n");
2852 pr_warn("** If you see this message and you are not debugging **\n");
2853 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2854 pr_warn("** **\n");
2855 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2856 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002857
Steven Rostedtb382ede62012-10-10 21:44:34 -04002858 /* Expand the buffers to set size */
2859 tracing_update_buffers();
2860
Steven Rostedt07d777f2011-09-22 14:01:55 -04002861 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002862
2863 /*
2864 * trace_printk_init_buffers() can be called by modules.
2865 * If that happens, then we need to start cmdline recording
2866 * directly here. If the global_trace.buffer is already
2867 * allocated here, then this was called by module code.
2868 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002869 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002870 tracing_start_cmdline_record();
2871}
2872
2873void trace_printk_start_comm(void)
2874{
2875 /* Start tracing comms if trace printk is set */
2876 if (!buffers_allocated)
2877 return;
2878 tracing_start_cmdline_record();
2879}
2880
2881static void trace_printk_start_stop_comm(int enabled)
2882{
2883 if (!buffers_allocated)
2884 return;
2885
2886 if (enabled)
2887 tracing_start_cmdline_record();
2888 else
2889 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002890}
2891
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002892/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002893 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002894 *
2895 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002896int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002897{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002898 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002899 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002900 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002901 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002902 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002903 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002904 char *tbuffer;
2905 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002906
2907 if (unlikely(tracing_selftest_running || tracing_disabled))
2908 return 0;
2909
2910 /* Don't pollute graph traces with trace_vprintk internals */
2911 pause_graph_tracing();
2912
2913 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002914 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002915
Steven Rostedt07d777f2011-09-22 14:01:55 -04002916 tbuffer = get_trace_buf();
2917 if (!tbuffer) {
2918 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002919 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002920 }
2921
2922 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2923
2924 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002925 goto out;
2926
Steven Rostedt07d777f2011-09-22 14:01:55 -04002927 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002928 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002929 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002930 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2931 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002932 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002933 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002934 entry = ring_buffer_event_data(event);
2935 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002936 entry->fmt = fmt;
2937
Steven Rostedt07d777f2011-09-22 14:01:55 -04002938 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002939 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002940 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002941 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002942 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002943
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002944out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002945 put_trace_buf();
2946
2947out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002948 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002949 unpause_graph_tracing();
2950
2951 return len;
2952}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002953EXPORT_SYMBOL_GPL(trace_vbprintk);
2954
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002955static int
2956__trace_array_vprintk(struct ring_buffer *buffer,
2957 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002958{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002959 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002960 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002961 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002962 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002963 unsigned long flags;
2964 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002965
2966 if (tracing_disabled || tracing_selftest_running)
2967 return 0;
2968
Steven Rostedt07d777f2011-09-22 14:01:55 -04002969 /* Don't pollute graph traces with trace_vprintk internals */
2970 pause_graph_tracing();
2971
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002972 pc = preempt_count();
2973 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002974
Steven Rostedt07d777f2011-09-22 14:01:55 -04002975
2976 tbuffer = get_trace_buf();
2977 if (!tbuffer) {
2978 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002979 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002980 }
2981
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002982 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002983
Steven Rostedt07d777f2011-09-22 14:01:55 -04002984 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002985 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002986 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2987 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002988 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002989 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002990 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002991 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002992
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002993 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002994 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002995 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002996 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002997 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002998
2999out:
3000 put_trace_buf();
3001
3002out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003003 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003004 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003005
3006 return len;
3007}
Steven Rostedt659372d2009-09-03 19:11:07 -04003008
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003009int trace_array_vprintk(struct trace_array *tr,
3010 unsigned long ip, const char *fmt, va_list args)
3011{
3012 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3013}
3014
3015int trace_array_printk(struct trace_array *tr,
3016 unsigned long ip, const char *fmt, ...)
3017{
3018 int ret;
3019 va_list ap;
3020
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003021 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003022 return 0;
3023
3024 va_start(ap, fmt);
3025 ret = trace_array_vprintk(tr, ip, fmt, ap);
3026 va_end(ap);
3027 return ret;
3028}
3029
3030int trace_array_printk_buf(struct ring_buffer *buffer,
3031 unsigned long ip, const char *fmt, ...)
3032{
3033 int ret;
3034 va_list ap;
3035
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003036 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003037 return 0;
3038
3039 va_start(ap, fmt);
3040 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3041 va_end(ap);
3042 return ret;
3043}
3044
Steven Rostedt659372d2009-09-03 19:11:07 -04003045int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3046{
Steven Rostedta813a152009-10-09 01:41:35 -04003047 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003048}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003049EXPORT_SYMBOL_GPL(trace_vprintk);
3050
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003051static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003052{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003053 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3054
Steven Rostedt5a90f572008-09-03 17:42:51 -04003055 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003056 if (buf_iter)
3057 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003058}
3059
Ingo Molnare309b412008-05-12 21:20:51 +02003060static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003061peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3062 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003063{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003064 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003065 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003066
Steven Rostedtd7690412008-10-01 00:29:53 -04003067 if (buf_iter)
3068 event = ring_buffer_iter_peek(buf_iter, ts);
3069 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003070 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003071 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003072
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003073 if (event) {
3074 iter->ent_size = ring_buffer_event_length(event);
3075 return ring_buffer_event_data(event);
3076 }
3077 iter->ent_size = 0;
3078 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003079}
Steven Rostedtd7690412008-10-01 00:29:53 -04003080
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003081static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003082__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3083 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003084{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003085 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003086 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003087 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003088 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003089 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003090 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003091 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003092 int cpu;
3093
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003094 /*
3095 * If we are in a per_cpu trace file, don't bother by iterating over
3096 * all cpu and peek directly.
3097 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003098 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003099 if (ring_buffer_empty_cpu(buffer, cpu_file))
3100 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003101 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003102 if (ent_cpu)
3103 *ent_cpu = cpu_file;
3104
3105 return ent;
3106 }
3107
Steven Rostedtab464282008-05-12 21:21:00 +02003108 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003109
3110 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003111 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003112
Steven Rostedtbc21b472010-03-31 19:49:26 -04003113 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003114
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003115 /*
3116 * Pick the entry with the smallest timestamp:
3117 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003118 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003119 next = ent;
3120 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003121 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003122 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003123 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003124 }
3125 }
3126
Steven Rostedt12b5da32012-03-27 10:43:28 -04003127 iter->ent_size = next_size;
3128
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003129 if (ent_cpu)
3130 *ent_cpu = next_cpu;
3131
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003132 if (ent_ts)
3133 *ent_ts = next_ts;
3134
Steven Rostedtbc21b472010-03-31 19:49:26 -04003135 if (missing_events)
3136 *missing_events = next_lost;
3137
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003138 return next;
3139}
3140
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003141/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003142struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3143 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003144{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003145 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003146}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003147
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003148/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003149void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003150{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003151 iter->ent = __find_next_entry(iter, &iter->cpu,
3152 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003153
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003154 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003155 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003156
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003157 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003158}
3159
Ingo Molnare309b412008-05-12 21:20:51 +02003160static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003161{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003162 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003163 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003164}
3165
Ingo Molnare309b412008-05-12 21:20:51 +02003166static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003167{
3168 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003170 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003171
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003172 WARN_ON_ONCE(iter->leftover);
3173
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003174 (*pos)++;
3175
3176 /* can't go backwards */
3177 if (iter->idx > i)
3178 return NULL;
3179
3180 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003181 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003182 else
3183 ent = iter;
3184
3185 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003186 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003187
3188 iter->pos = *pos;
3189
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003190 return ent;
3191}
3192
Jason Wessel955b61e2010-08-05 09:22:23 -05003193void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003194{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003195 struct ring_buffer_event *event;
3196 struct ring_buffer_iter *buf_iter;
3197 unsigned long entries = 0;
3198 u64 ts;
3199
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003200 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003201
Steven Rostedt6d158a82012-06-27 20:46:14 -04003202 buf_iter = trace_buffer_iter(iter, cpu);
3203 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003204 return;
3205
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003206 ring_buffer_iter_reset(buf_iter);
3207
3208 /*
3209 * We could have the case with the max latency tracers
3210 * that a reset never took place on a cpu. This is evident
3211 * by the timestamp being before the start of the buffer.
3212 */
3213 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003214 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003215 break;
3216 entries++;
3217 ring_buffer_read(buf_iter, NULL);
3218 }
3219
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003220 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003221}
3222
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003223/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003224 * The current tracer is copied to avoid a global locking
3225 * all around.
3226 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003227static void *s_start(struct seq_file *m, loff_t *pos)
3228{
3229 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003230 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003231 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003232 void *p = NULL;
3233 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003234 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003235
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003236 /*
3237 * copy the tracer to avoid using a global lock all around.
3238 * iter->trace is a copy of current_trace, the pointer to the
3239 * name may be used instead of a strcmp(), as iter->trace->name
3240 * will point to the same string as current_trace->name.
3241 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003242 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003243 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3244 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003245 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003246
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003247#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003248 if (iter->snapshot && iter->trace->use_max_tr)
3249 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003250#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003251
3252 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003253 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003254
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003255 if (*pos != iter->pos) {
3256 iter->ent = NULL;
3257 iter->cpu = 0;
3258 iter->idx = -1;
3259
Steven Rostedtae3b5092013-01-23 15:22:59 -05003260 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003261 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003262 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003263 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003264 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003265
Lai Jiangshanac91d852010-03-02 17:54:50 +08003266 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003267 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3268 ;
3269
3270 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003271 /*
3272 * If we overflowed the seq_file before, then we want
3273 * to just reuse the trace_seq buffer again.
3274 */
3275 if (iter->leftover)
3276 p = iter;
3277 else {
3278 l = *pos - 1;
3279 p = s_next(m, p, &l);
3280 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003281 }
3282
Lai Jiangshan4f535962009-05-18 19:35:34 +08003283 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003284 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003285 return p;
3286}
3287
3288static void s_stop(struct seq_file *m, void *p)
3289{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003290 struct trace_iterator *iter = m->private;
3291
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003292#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003293 if (iter->snapshot && iter->trace->use_max_tr)
3294 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003295#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003296
3297 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003298 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003299
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003300 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003301 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003302}
3303
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003304static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003305get_total_entries(struct trace_buffer *buf,
3306 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003307{
3308 unsigned long count;
3309 int cpu;
3310
3311 *total = 0;
3312 *entries = 0;
3313
3314 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003315 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003316 /*
3317 * If this buffer has skipped entries, then we hold all
3318 * entries for the trace and we need to ignore the
3319 * ones before the time stamp.
3320 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003321 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3322 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003323 /* total is the same as the entries */
3324 *total += count;
3325 } else
3326 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003327 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003328 *entries += count;
3329 }
3330}
3331
Ingo Molnare309b412008-05-12 21:20:51 +02003332static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003333{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003334 seq_puts(m, "# _------=> CPU# \n"
3335 "# / _-----=> irqs-off \n"
3336 "# | / _----=> need-resched \n"
3337 "# || / _---=> hardirq/softirq \n"
3338 "# ||| / _--=> preempt-depth \n"
3339 "# |||| / delay \n"
3340 "# cmd pid ||||| time | caller \n"
3341 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003342}
3343
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003344static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003345{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003346 unsigned long total;
3347 unsigned long entries;
3348
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003349 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003350 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3351 entries, total, num_online_cpus());
3352 seq_puts(m, "#\n");
3353}
3354
Joel Fernandes441dae82017-06-25 22:38:43 -07003355static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3356 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003357{
Joel Fernandes441dae82017-06-25 22:38:43 -07003358 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3359
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003360 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003361
3362 seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3363 seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003364}
3365
Joel Fernandes441dae82017-06-25 22:38:43 -07003366static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3367 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003368{
Joel Fernandes441dae82017-06-25 22:38:43 -07003369 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003370 const char tgid_space[] = " ";
3371 const char space[] = " ";
Joel Fernandes441dae82017-06-25 22:38:43 -07003372
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003373 seq_printf(m, "# %s _-----=> irqs-off\n",
3374 tgid ? tgid_space : space);
3375 seq_printf(m, "# %s / _----=> need-resched\n",
3376 tgid ? tgid_space : space);
3377 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3378 tgid ? tgid_space : space);
3379 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3380 tgid ? tgid_space : space);
3381 seq_printf(m, "# %s||| / delay\n",
3382 tgid ? tgid_space : space);
3383 seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
3384 tgid ? " TGID " : space);
3385 seq_printf(m, "# | | | %s|||| | |\n",
3386 tgid ? " | " : space);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003387}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003388
Jiri Olsa62b915f2010-04-02 19:01:22 +02003389void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003390print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3391{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003392 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003393 struct trace_buffer *buf = iter->trace_buffer;
3394 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003395 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003396 unsigned long entries;
3397 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003398 const char *name = "preemption";
3399
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003400 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003401
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003402 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003403
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003404 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003405 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003406 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003407 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003408 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003409 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003410 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003411 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003412 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003413 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003414#if defined(CONFIG_PREEMPT_NONE)
3415 "server",
3416#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3417 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003418#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003419 "preempt",
3420#else
3421 "unknown",
3422#endif
3423 /* These are reserved for later use */
3424 0, 0, 0, 0);
3425#ifdef CONFIG_SMP
3426 seq_printf(m, " #P:%d)\n", num_online_cpus());
3427#else
3428 seq_puts(m, ")\n");
3429#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003430 seq_puts(m, "# -----------------\n");
3431 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003432 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003433 data->comm, data->pid,
3434 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003436 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003437
3438 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003439 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003440 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3441 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003442 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003443 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3444 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003445 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003446 }
3447
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003448 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003449}
3450
Steven Rostedta3097202008-11-07 22:36:02 -05003451static void test_cpu_buff_start(struct trace_iterator *iter)
3452{
3453 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003454 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003455
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003456 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003457 return;
3458
3459 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3460 return;
3461
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003462 if (cpumask_available(iter->started) &&
3463 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003464 return;
3465
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003466 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003467 return;
3468
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003469 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003470 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003471
3472 /* Don't print started cpu buffer for the first entry of the trace */
3473 if (iter->idx > 1)
3474 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3475 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003476}
3477
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003478static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003479{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003480 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003481 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003482 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003483 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003484 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003485
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003486 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003487
Steven Rostedta3097202008-11-07 22:36:02 -05003488 test_cpu_buff_start(iter);
3489
Steven Rostedtf633cef2008-12-23 23:24:13 -05003490 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003491
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003492 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003493 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3494 trace_print_lat_context(iter);
3495 else
3496 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003497 }
3498
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003499 if (trace_seq_has_overflowed(s))
3500 return TRACE_TYPE_PARTIAL_LINE;
3501
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003502 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003503 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003504
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003505 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003506
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003507 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003508}
3509
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003510static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003511{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003512 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003513 struct trace_seq *s = &iter->seq;
3514 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003515 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003516
3517 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003518
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003519 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003520 trace_seq_printf(s, "%d %d %llu ",
3521 entry->pid, iter->cpu, iter->ts);
3522
3523 if (trace_seq_has_overflowed(s))
3524 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003525
Steven Rostedtf633cef2008-12-23 23:24:13 -05003526 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003527 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003528 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003529
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003530 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003531
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003532 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003533}
3534
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003535static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003536{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003537 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003538 struct trace_seq *s = &iter->seq;
3539 unsigned char newline = '\n';
3540 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003541 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003542
3543 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003544
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003545 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003546 SEQ_PUT_HEX_FIELD(s, entry->pid);
3547 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3548 SEQ_PUT_HEX_FIELD(s, iter->ts);
3549 if (trace_seq_has_overflowed(s))
3550 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003551 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003552
Steven Rostedtf633cef2008-12-23 23:24:13 -05003553 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003554 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003555 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003556 if (ret != TRACE_TYPE_HANDLED)
3557 return ret;
3558 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003559
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003560 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003561
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003562 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003563}
3564
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003565static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003566{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003567 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003568 struct trace_seq *s = &iter->seq;
3569 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003570 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003571
3572 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003573
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003574 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003575 SEQ_PUT_FIELD(s, entry->pid);
3576 SEQ_PUT_FIELD(s, iter->cpu);
3577 SEQ_PUT_FIELD(s, iter->ts);
3578 if (trace_seq_has_overflowed(s))
3579 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003580 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003581
Steven Rostedtf633cef2008-12-23 23:24:13 -05003582 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003583 return event ? event->funcs->binary(iter, 0, event) :
3584 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003585}
3586
Jiri Olsa62b915f2010-04-02 19:01:22 +02003587int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003588{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003589 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003590 int cpu;
3591
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003592 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003593 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003594 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003595 buf_iter = trace_buffer_iter(iter, cpu);
3596 if (buf_iter) {
3597 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003598 return 0;
3599 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003600 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003601 return 0;
3602 }
3603 return 1;
3604 }
3605
Steven Rostedtab464282008-05-12 21:21:00 +02003606 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003607 buf_iter = trace_buffer_iter(iter, cpu);
3608 if (buf_iter) {
3609 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003610 return 0;
3611 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003612 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003613 return 0;
3614 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003615 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003616
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003617 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003618}
3619
Lai Jiangshan4f535962009-05-18 19:35:34 +08003620/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003621enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003622{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003623 struct trace_array *tr = iter->tr;
3624 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003625 enum print_line_t ret;
3626
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003627 if (iter->lost_events) {
3628 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3629 iter->cpu, iter->lost_events);
3630 if (trace_seq_has_overflowed(&iter->seq))
3631 return TRACE_TYPE_PARTIAL_LINE;
3632 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003633
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003634 if (iter->trace && iter->trace->print_line) {
3635 ret = iter->trace->print_line(iter);
3636 if (ret != TRACE_TYPE_UNHANDLED)
3637 return ret;
3638 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003639
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003640 if (iter->ent->type == TRACE_BPUTS &&
3641 trace_flags & TRACE_ITER_PRINTK &&
3642 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3643 return trace_print_bputs_msg_only(iter);
3644
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003645 if (iter->ent->type == TRACE_BPRINT &&
3646 trace_flags & TRACE_ITER_PRINTK &&
3647 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003648 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003649
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003650 if (iter->ent->type == TRACE_PRINT &&
3651 trace_flags & TRACE_ITER_PRINTK &&
3652 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003653 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003654
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003655 if (trace_flags & TRACE_ITER_BIN)
3656 return print_bin_fmt(iter);
3657
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003658 if (trace_flags & TRACE_ITER_HEX)
3659 return print_hex_fmt(iter);
3660
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003661 if (trace_flags & TRACE_ITER_RAW)
3662 return print_raw_fmt(iter);
3663
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003664 return print_trace_fmt(iter);
3665}
3666
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003667void trace_latency_header(struct seq_file *m)
3668{
3669 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003670 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003671
3672 /* print nothing if the buffers are empty */
3673 if (trace_empty(iter))
3674 return;
3675
3676 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3677 print_trace_header(m, iter);
3678
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003679 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003680 print_lat_help_header(m);
3681}
3682
Jiri Olsa62b915f2010-04-02 19:01:22 +02003683void trace_default_header(struct seq_file *m)
3684{
3685 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003686 struct trace_array *tr = iter->tr;
3687 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003688
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003689 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3690 return;
3691
Jiri Olsa62b915f2010-04-02 19:01:22 +02003692 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3693 /* print nothing if the buffers are empty */
3694 if (trace_empty(iter))
3695 return;
3696 print_trace_header(m, iter);
3697 if (!(trace_flags & TRACE_ITER_VERBOSE))
3698 print_lat_help_header(m);
3699 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003700 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3701 if (trace_flags & TRACE_ITER_IRQ_INFO)
Joel Fernandes441dae82017-06-25 22:38:43 -07003702 print_func_help_header_irq(iter->trace_buffer,
3703 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003704 else
Joel Fernandes441dae82017-06-25 22:38:43 -07003705 print_func_help_header(iter->trace_buffer, m,
3706 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003707 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003708 }
3709}
3710
Steven Rostedte0a413f2011-09-29 21:26:16 -04003711static void test_ftrace_alive(struct seq_file *m)
3712{
3713 if (!ftrace_is_dead())
3714 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003715 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3716 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003717}
3718
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003719#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003720static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003721{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003722 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3723 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3724 "# Takes a snapshot of the main buffer.\n"
3725 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3726 "# (Doesn't have to be '2' works with any number that\n"
3727 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003728}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003729
3730static void show_snapshot_percpu_help(struct seq_file *m)
3731{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003732 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003733#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003734 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3735 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003736#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003737 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3738 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003739#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003740 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3741 "# (Doesn't have to be '2' works with any number that\n"
3742 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003743}
3744
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003745static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3746{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003747 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003748 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003749 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003750 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003751
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003752 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003753 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3754 show_snapshot_main_help(m);
3755 else
3756 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003757}
3758#else
3759/* Should never be called */
3760static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3761#endif
3762
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003763static int s_show(struct seq_file *m, void *v)
3764{
3765 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003766 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003767
3768 if (iter->ent == NULL) {
3769 if (iter->tr) {
3770 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3771 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003772 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003773 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003774 if (iter->snapshot && trace_empty(iter))
3775 print_snapshot_help(m, iter);
3776 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003777 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003778 else
3779 trace_default_header(m);
3780
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003781 } else if (iter->leftover) {
3782 /*
3783 * If we filled the seq_file buffer earlier, we
3784 * want to just show it now.
3785 */
3786 ret = trace_print_seq(m, &iter->seq);
3787
3788 /* ret should this time be zero, but you never know */
3789 iter->leftover = ret;
3790
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003791 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003792 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003793 ret = trace_print_seq(m, &iter->seq);
3794 /*
3795 * If we overflow the seq_file buffer, then it will
3796 * ask us for this data again at start up.
3797 * Use that instead.
3798 * ret is 0 if seq_file write succeeded.
3799 * -1 otherwise.
3800 */
3801 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003802 }
3803
3804 return 0;
3805}
3806
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003807/*
3808 * Should be used after trace_array_get(), trace_types_lock
3809 * ensures that i_cdev was already initialized.
3810 */
3811static inline int tracing_get_cpu(struct inode *inode)
3812{
3813 if (inode->i_cdev) /* See trace_create_cpu_file() */
3814 return (long)inode->i_cdev - 1;
3815 return RING_BUFFER_ALL_CPUS;
3816}
3817
James Morris88e9d342009-09-22 16:43:43 -07003818static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003819 .start = s_start,
3820 .next = s_next,
3821 .stop = s_stop,
3822 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003823};
3824
Ingo Molnare309b412008-05-12 21:20:51 +02003825static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003826__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003827{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003828 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003829 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003830 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003831
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003832 if (tracing_disabled)
3833 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003834
Jiri Olsa50e18b92012-04-25 10:23:39 +02003835 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003836 if (!iter)
3837 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003838
Gil Fruchter72917232015-06-09 10:32:35 +03003839 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003840 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003841 if (!iter->buffer_iter)
3842 goto release;
3843
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003844 /*
3845 * We make a copy of the current tracer to avoid concurrent
3846 * changes on it while we are reading.
3847 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003848 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003849 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003850 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003851 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003852
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003853 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003854
Li Zefan79f55992009-06-15 14:58:26 +08003855 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003856 goto fail;
3857
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003858 iter->tr = tr;
3859
3860#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003861 /* Currently only the top directory has a snapshot */
3862 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003863 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003864 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003865#endif
3866 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003867 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003868 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003869 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003870 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003871
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003872 /* Notify the tracer early; before we stop tracing. */
3873 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003874 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003875
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003876 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003877 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003878 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3879
David Sharp8be07092012-11-13 12:18:22 -08003880 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003881 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003882 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3883
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003884 /* stop the trace while dumping if we are not opening "snapshot" */
3885 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003886 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003887
Steven Rostedtae3b5092013-01-23 15:22:59 -05003888 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003889 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003890 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003891 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003892 }
3893 ring_buffer_read_prepare_sync();
3894 for_each_tracing_cpu(cpu) {
3895 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003896 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003897 }
3898 } else {
3899 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003900 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003901 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003902 ring_buffer_read_prepare_sync();
3903 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003904 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003905 }
3906
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003907 mutex_unlock(&trace_types_lock);
3908
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003909 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003910
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003911 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003912 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003913 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003914 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003915release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003916 seq_release_private(inode, file);
3917 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003918}
3919
3920int tracing_open_generic(struct inode *inode, struct file *filp)
3921{
Steven Rostedt60a11772008-05-12 21:20:44 +02003922 if (tracing_disabled)
3923 return -ENODEV;
3924
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003925 filp->private_data = inode->i_private;
3926 return 0;
3927}
3928
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003929bool tracing_is_disabled(void)
3930{
3931 return (tracing_disabled) ? true: false;
3932}
3933
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003934/*
3935 * Open and update trace_array ref count.
3936 * Must have the current trace_array passed to it.
3937 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003938static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003939{
3940 struct trace_array *tr = inode->i_private;
3941
3942 if (tracing_disabled)
3943 return -ENODEV;
3944
3945 if (trace_array_get(tr) < 0)
3946 return -ENODEV;
3947
3948 filp->private_data = inode->i_private;
3949
3950 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003951}
3952
Hannes Eder4fd27352009-02-10 19:44:12 +01003953static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003954{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003955 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003956 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003957 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003958 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003959
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003960 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003961 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003962 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003963 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003964
Oleg Nesterov6484c712013-07-23 17:26:10 +02003965 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003966 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003967 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003968
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003969 for_each_tracing_cpu(cpu) {
3970 if (iter->buffer_iter[cpu])
3971 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3972 }
3973
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003974 if (iter->trace && iter->trace->close)
3975 iter->trace->close(iter);
3976
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003977 if (!iter->snapshot)
3978 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003979 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003980
3981 __trace_array_put(tr);
3982
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003983 mutex_unlock(&trace_types_lock);
3984
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003985 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003986 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003987 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003988 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003989 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003990
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003991 return 0;
3992}
3993
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003994static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3995{
3996 struct trace_array *tr = inode->i_private;
3997
3998 trace_array_put(tr);
3999 return 0;
4000}
4001
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004002static int tracing_single_release_tr(struct inode *inode, struct file *file)
4003{
4004 struct trace_array *tr = inode->i_private;
4005
4006 trace_array_put(tr);
4007
4008 return single_release(inode, file);
4009}
4010
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004011static int tracing_open(struct inode *inode, struct file *file)
4012{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004013 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004014 struct trace_iterator *iter;
4015 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004016
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004017 if (trace_array_get(tr) < 0)
4018 return -ENODEV;
4019
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004020 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004021 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4022 int cpu = tracing_get_cpu(inode);
Bo Yan8dd33bc2017-09-18 10:03:35 -07004023 struct trace_buffer *trace_buf = &tr->trace_buffer;
4024
4025#ifdef CONFIG_TRACER_MAX_TRACE
4026 if (tr->current_trace->print_max)
4027 trace_buf = &tr->max_buffer;
4028#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004029
4030 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004031 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004032 else
Bo Yan8dd33bc2017-09-18 10:03:35 -07004033 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004034 }
4035
4036 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004037 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004038 if (IS_ERR(iter))
4039 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004040 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004041 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4042 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004043
4044 if (ret < 0)
4045 trace_array_put(tr);
4046
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004047 return ret;
4048}
4049
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004050/*
4051 * Some tracers are not suitable for instance buffers.
4052 * A tracer is always available for the global array (toplevel)
4053 * or if it explicitly states that it is.
4054 */
4055static bool
4056trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4057{
4058 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4059}
4060
4061/* Find the next tracer that this trace array may use */
4062static struct tracer *
4063get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4064{
4065 while (t && !trace_ok_for_array(t, tr))
4066 t = t->next;
4067
4068 return t;
4069}
4070
Ingo Molnare309b412008-05-12 21:20:51 +02004071static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004072t_next(struct seq_file *m, void *v, loff_t *pos)
4073{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004074 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004075 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004076
4077 (*pos)++;
4078
4079 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004080 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004081
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004082 return t;
4083}
4084
4085static void *t_start(struct seq_file *m, loff_t *pos)
4086{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004087 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004088 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004089 loff_t l = 0;
4090
4091 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004092
4093 t = get_tracer_for_array(tr, trace_types);
4094 for (; t && l < *pos; t = t_next(m, t, &l))
4095 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004096
4097 return t;
4098}
4099
4100static void t_stop(struct seq_file *m, void *p)
4101{
4102 mutex_unlock(&trace_types_lock);
4103}
4104
4105static int t_show(struct seq_file *m, void *v)
4106{
4107 struct tracer *t = v;
4108
4109 if (!t)
4110 return 0;
4111
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004112 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113 if (t->next)
4114 seq_putc(m, ' ');
4115 else
4116 seq_putc(m, '\n');
4117
4118 return 0;
4119}
4120
James Morris88e9d342009-09-22 16:43:43 -07004121static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004122 .start = t_start,
4123 .next = t_next,
4124 .stop = t_stop,
4125 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004126};
4127
4128static int show_traces_open(struct inode *inode, struct file *file)
4129{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004130 struct trace_array *tr = inode->i_private;
4131 struct seq_file *m;
4132 int ret;
4133
Steven Rostedt60a11772008-05-12 21:20:44 +02004134 if (tracing_disabled)
4135 return -ENODEV;
4136
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004137 ret = seq_open(file, &show_traces_seq_ops);
4138 if (ret)
4139 return ret;
4140
4141 m = file->private_data;
4142 m->private = tr;
4143
4144 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004145}
4146
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004147static ssize_t
4148tracing_write_stub(struct file *filp, const char __user *ubuf,
4149 size_t count, loff_t *ppos)
4150{
4151 return count;
4152}
4153
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004154loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004155{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004156 int ret;
4157
Slava Pestov364829b2010-11-24 15:13:16 -08004158 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004159 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004160 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004161 file->f_pos = ret = 0;
4162
4163 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004164}
4165
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004166static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004167 .open = tracing_open,
4168 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004169 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004170 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004171 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004172};
4173
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004174static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004175 .open = show_traces_open,
4176 .read = seq_read,
4177 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004178 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004179};
4180
4181static ssize_t
4182tracing_cpumask_read(struct file *filp, char __user *ubuf,
4183 size_t count, loff_t *ppos)
4184{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004185 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004186 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004187 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004188
Changbin Du90e406f2017-11-30 11:39:43 +08004189 len = snprintf(NULL, 0, "%*pb\n",
4190 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4191 mask_str = kmalloc(len, GFP_KERNEL);
4192 if (!mask_str)
4193 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004194
Changbin Du90e406f2017-11-30 11:39:43 +08004195 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004196 cpumask_pr_args(tr->tracing_cpumask));
4197 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004198 count = -EINVAL;
4199 goto out_err;
4200 }
Changbin Du90e406f2017-11-30 11:39:43 +08004201 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004202
4203out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004204 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004205
4206 return count;
4207}
4208
4209static ssize_t
4210tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4211 size_t count, loff_t *ppos)
4212{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004213 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304214 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004215 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304216
4217 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4218 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004219
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304220 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004221 if (err)
4222 goto err_unlock;
4223
Steven Rostedta5e25882008-12-02 15:34:05 -05004224 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004225 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004226 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004227 /*
4228 * Increase/decrease the disabled counter if we are
4229 * about to flip a bit in the cpumask:
4230 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004231 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304232 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004233 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4234 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004235 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004236 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304237 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004238 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4239 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004240 }
4241 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004242 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004243 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004244
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004245 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304246 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004247
Ingo Molnarc7078de2008-05-12 21:20:52 +02004248 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004249
4250err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004251 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004252
4253 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004254}
4255
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004256static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004257 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004258 .read = tracing_cpumask_read,
4259 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004260 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004261 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004262};
4263
Li Zefanfdb372e2009-12-08 11:15:59 +08004264static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004265{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004266 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004267 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004268 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004269 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004270
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004271 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004272 tracer_flags = tr->current_trace->flags->val;
4273 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004274
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004275 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004276 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004277 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004278 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004279 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004280 }
4281
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004282 for (i = 0; trace_opts[i].name; i++) {
4283 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004284 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004285 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004286 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004287 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004288 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004289
Li Zefanfdb372e2009-12-08 11:15:59 +08004290 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004291}
4292
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004293static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004294 struct tracer_flags *tracer_flags,
4295 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004296{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004297 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004298 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004299
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004300 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004301 if (ret)
4302 return ret;
4303
4304 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004305 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004306 else
Zhaolei77708412009-08-07 18:53:21 +08004307 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004308 return 0;
4309}
4310
Li Zefan8d18eaa2009-12-08 11:17:06 +08004311/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004312static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004313{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004314 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004315 struct tracer_flags *tracer_flags = trace->flags;
4316 struct tracer_opt *opts = NULL;
4317 int i;
4318
4319 for (i = 0; tracer_flags->opts[i].name; i++) {
4320 opts = &tracer_flags->opts[i];
4321
4322 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004323 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004324 }
4325
4326 return -EINVAL;
4327}
4328
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004329/* Some tracers require overwrite to stay enabled */
4330int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4331{
4332 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4333 return -1;
4334
4335 return 0;
4336}
4337
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004338int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004339{
4340 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004341 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004342 return 0;
4343
4344 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004345 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004346 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004347 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004348
4349 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004350 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004351 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004352 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004353
4354 if (mask == TRACE_ITER_RECORD_CMD)
4355 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004356
Joel Fernandesd914ba32017-06-26 19:01:55 -07004357 if (mask == TRACE_ITER_RECORD_TGID) {
4358 if (!tgid_map)
4359 tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
4360 GFP_KERNEL);
4361 if (!tgid_map) {
4362 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4363 return -ENOMEM;
4364 }
4365
4366 trace_event_enable_tgid_record(enabled);
4367 }
4368
Steven Rostedtc37775d2016-04-13 16:59:18 -04004369 if (mask == TRACE_ITER_EVENT_FORK)
4370 trace_event_follow_fork(tr, enabled);
4371
Namhyung Kim1e104862017-04-17 11:44:28 +09004372 if (mask == TRACE_ITER_FUNC_FORK)
4373 ftrace_pid_follow_fork(tr, enabled);
4374
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004375 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004376 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004377#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004378 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004379#endif
4380 }
Steven Rostedt81698832012-10-11 10:15:05 -04004381
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004382 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004383 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004384 trace_printk_control(enabled);
4385 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004386
4387 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004388}
4389
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004390static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004391{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004392 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004393 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004394 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004395 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004396 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004397
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004398 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004399
Li Zefan8d18eaa2009-12-08 11:17:06 +08004400 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004401 neg = 1;
4402 cmp += 2;
4403 }
4404
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004405 mutex_lock(&trace_types_lock);
4406
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004407 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08004408 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004409 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004410 break;
4411 }
4412 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004413
4414 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004415 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004416 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004417
4418 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004419
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004420 /*
4421 * If the first trailing whitespace is replaced with '\0' by strstrip,
4422 * turn it back into a space.
4423 */
4424 if (orig_len > strlen(option))
4425 option[strlen(option)] = ' ';
4426
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004427 return ret;
4428}
4429
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004430static void __init apply_trace_boot_options(void)
4431{
4432 char *buf = trace_boot_options_buf;
4433 char *option;
4434
4435 while (true) {
4436 option = strsep(&buf, ",");
4437
4438 if (!option)
4439 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004440
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004441 if (*option)
4442 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004443
4444 /* Put back the comma to allow this to be called again */
4445 if (buf)
4446 *(buf - 1) = ',';
4447 }
4448}
4449
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004450static ssize_t
4451tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4452 size_t cnt, loff_t *ppos)
4453{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004454 struct seq_file *m = filp->private_data;
4455 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004456 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004457 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004458
4459 if (cnt >= sizeof(buf))
4460 return -EINVAL;
4461
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004462 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004463 return -EFAULT;
4464
Steven Rostedta8dd2172013-01-09 20:54:17 -05004465 buf[cnt] = 0;
4466
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004467 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004468 if (ret < 0)
4469 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004470
Jiri Olsacf8517c2009-10-23 19:36:16 -04004471 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004472
4473 return cnt;
4474}
4475
Li Zefanfdb372e2009-12-08 11:15:59 +08004476static int tracing_trace_options_open(struct inode *inode, struct file *file)
4477{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004478 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004479 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004480
Li Zefanfdb372e2009-12-08 11:15:59 +08004481 if (tracing_disabled)
4482 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004483
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004484 if (trace_array_get(tr) < 0)
4485 return -ENODEV;
4486
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004487 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4488 if (ret < 0)
4489 trace_array_put(tr);
4490
4491 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004492}
4493
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004494static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004495 .open = tracing_trace_options_open,
4496 .read = seq_read,
4497 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004498 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004499 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004500};
4501
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004502static const char readme_msg[] =
4503 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004504 "# echo 0 > tracing_on : quick way to disable tracing\n"
4505 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4506 " Important files:\n"
4507 " trace\t\t\t- The static contents of the buffer\n"
4508 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4509 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4510 " current_tracer\t- function and latency tracers\n"
4511 " available_tracers\t- list of configured tracers for current_tracer\n"
4512 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4513 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4514 " trace_clock\t\t-change the clock used to order events\n"
4515 " local: Per cpu clock but may not be synced across CPUs\n"
4516 " global: Synced across CPUs but slows tracing down.\n"
4517 " counter: Not a clock, but just an increment\n"
4518 " uptime: Jiffy counter from time of boot\n"
4519 " perf: Same clock that perf events use\n"
4520#ifdef CONFIG_X86_64
4521 " x86-tsc: TSC cycle counter\n"
4522#endif
4523 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004524 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004525 " tracing_cpumask\t- Limit which CPUs to trace\n"
4526 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4527 "\t\t\t Remove sub-buffer with rmdir\n"
4528 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004529 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4530 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004531 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004532#ifdef CONFIG_DYNAMIC_FTRACE
4533 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004534 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4535 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004536 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004537 "\t modules: Can select a group via module\n"
4538 "\t Format: :mod:<module-name>\n"
4539 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4540 "\t triggers: a command to perform when function is hit\n"
4541 "\t Format: <function>:<trigger>[:count]\n"
4542 "\t trigger: traceon, traceoff\n"
4543 "\t\t enable_event:<system>:<event>\n"
4544 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004545#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004546 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004547#endif
4548#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004549 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004550#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004551 "\t\t dump\n"
4552 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004553 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4554 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4555 "\t The first one will disable tracing every time do_fault is hit\n"
4556 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4557 "\t The first time do trap is hit and it disables tracing, the\n"
4558 "\t counter will decrement to 2. If tracing is already disabled,\n"
4559 "\t the counter will not decrement. It only decrements when the\n"
4560 "\t trigger did work\n"
4561 "\t To remove trigger without count:\n"
4562 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4563 "\t To remove trigger with a count:\n"
4564 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004565 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004566 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4567 "\t modules: Can select a group via module command :mod:\n"
4568 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004569#endif /* CONFIG_DYNAMIC_FTRACE */
4570#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004571 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4572 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004573#endif
4574#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4575 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004576 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004577 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4578#endif
4579#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004580 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4581 "\t\t\t snapshot buffer. Read the contents for more\n"
4582 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004583#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004584#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004585 " stack_trace\t\t- Shows the max stack trace when active\n"
4586 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004587 "\t\t\t Write into this file to reset the max size (trigger a\n"
4588 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004589#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004590 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4591 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004592#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004593#endif /* CONFIG_STACK_TRACER */
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004594#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004595 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4596 "\t\t\t Write into this file to define/undefine new trace events.\n"
4597#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004598#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004599 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4600 "\t\t\t Write into this file to define/undefine new trace events.\n"
4601#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004602#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004603 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09004604 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4605 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004606 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004607#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004608 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05304609 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004610#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004611#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004612 "\t place: <path>:<offset>\n"
4613#endif
4614 "\t args: <name>=fetcharg[:type]\n"
4615 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4616 "\t $stack<index>, $stack, $retval, $comm\n"
4617 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4618 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4619#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004620 " events/\t\t- Directory containing all trace event subsystems:\n"
4621 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4622 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004623 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4624 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004625 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004626 " events/<system>/<event>/\t- Directory containing control files for\n"
4627 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004628 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4629 " filter\t\t- If set, only events passing filter are traced\n"
4630 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004631 "\t Format: <trigger>[:count][if <filter>]\n"
4632 "\t trigger: traceon, traceoff\n"
4633 "\t enable_event:<system>:<event>\n"
4634 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004635#ifdef CONFIG_HIST_TRIGGERS
4636 "\t enable_hist:<system>:<event>\n"
4637 "\t disable_hist:<system>:<event>\n"
4638#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004639#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004640 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004641#endif
4642#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004643 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004644#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004645#ifdef CONFIG_HIST_TRIGGERS
4646 "\t\t hist (see below)\n"
4647#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004648 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4649 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4650 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4651 "\t events/block/block_unplug/trigger\n"
4652 "\t The first disables tracing every time block_unplug is hit.\n"
4653 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4654 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4655 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4656 "\t Like function triggers, the counter is only decremented if it\n"
4657 "\t enabled or disabled tracing.\n"
4658 "\t To remove a trigger without a count:\n"
4659 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4660 "\t To remove a trigger with a count:\n"
4661 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4662 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004663#ifdef CONFIG_HIST_TRIGGERS
4664 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004665 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004666 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004667 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004668 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004669 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004670 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004671 "\t [if <filter>]\n\n"
4672 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004673 "\t table using the key(s) and value(s) named, and the value of a\n"
4674 "\t sum called 'hitcount' is incremented. Keys and values\n"
4675 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004676 "\t can be any field, or the special string 'stacktrace'.\n"
4677 "\t Compound keys consisting of up to two fields can be specified\n"
4678 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4679 "\t fields. Sort keys consisting of up to two fields can be\n"
4680 "\t specified using the 'sort' keyword. The sort direction can\n"
4681 "\t be modified by appending '.descending' or '.ascending' to a\n"
4682 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004683 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4684 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4685 "\t its histogram data will be shared with other triggers of the\n"
4686 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004687 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004688 "\t table in its entirety to stdout. If there are multiple hist\n"
4689 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004690 "\t trigger in the output. The table displayed for a named\n"
4691 "\t trigger will be the same as any other instance having the\n"
4692 "\t same name. The default format used to display a given field\n"
4693 "\t can be modified by appending any of the following modifiers\n"
4694 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004695 "\t .hex display a number as a hex value\n"
4696 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004697 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004698 "\t .execname display a common_pid as a program name\n"
4699 "\t .syscall display a syscall id as a syscall name\n\n"
Namhyung Kim4b94f5b2016-03-03 12:55:02 -06004700 "\t .log2 display log2 value rather than raw number\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004701 "\t The 'pause' parameter can be used to pause an existing hist\n"
4702 "\t trigger or to start a hist trigger but not log any events\n"
4703 "\t until told to do so. 'continue' can be used to start or\n"
4704 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004705 "\t The 'clear' parameter will clear the contents of a running\n"
4706 "\t hist trigger and leave its current paused/active state\n"
4707 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004708 "\t The enable_hist and disable_hist triggers can be used to\n"
4709 "\t have one event conditionally start and stop another event's\n"
4710 "\t already-attached hist trigger. The syntax is analagous to\n"
4711 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004712#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004713;
4714
4715static ssize_t
4716tracing_readme_read(struct file *filp, char __user *ubuf,
4717 size_t cnt, loff_t *ppos)
4718{
4719 return simple_read_from_buffer(ubuf, cnt, ppos,
4720 readme_msg, strlen(readme_msg));
4721}
4722
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004723static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004724 .open = tracing_open_generic,
4725 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004726 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004727};
4728
Michael Sartain99c621d2017-07-05 22:07:15 -06004729static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4730{
4731 int *ptr = v;
4732
4733 if (*pos || m->count)
4734 ptr++;
4735
4736 (*pos)++;
4737
4738 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4739 if (trace_find_tgid(*ptr))
4740 return ptr;
4741 }
4742
4743 return NULL;
4744}
4745
4746static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4747{
4748 void *v;
4749 loff_t l = 0;
4750
4751 if (!tgid_map)
4752 return NULL;
4753
4754 v = &tgid_map[0];
4755 while (l <= *pos) {
4756 v = saved_tgids_next(m, v, &l);
4757 if (!v)
4758 return NULL;
4759 }
4760
4761 return v;
4762}
4763
4764static void saved_tgids_stop(struct seq_file *m, void *v)
4765{
4766}
4767
4768static int saved_tgids_show(struct seq_file *m, void *v)
4769{
4770 int pid = (int *)v - tgid_map;
4771
4772 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4773 return 0;
4774}
4775
4776static const struct seq_operations tracing_saved_tgids_seq_ops = {
4777 .start = saved_tgids_start,
4778 .stop = saved_tgids_stop,
4779 .next = saved_tgids_next,
4780 .show = saved_tgids_show,
4781};
4782
4783static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4784{
4785 if (tracing_disabled)
4786 return -ENODEV;
4787
4788 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4789}
4790
4791
4792static const struct file_operations tracing_saved_tgids_fops = {
4793 .open = tracing_saved_tgids_open,
4794 .read = seq_read,
4795 .llseek = seq_lseek,
4796 .release = seq_release,
4797};
4798
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004799static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004800{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004801 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004802
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004803 if (*pos || m->count)
4804 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004805
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004806 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004807
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004808 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4809 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004810 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004811 continue;
4812
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004813 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004814 }
4815
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004816 return NULL;
4817}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004818
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004819static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4820{
4821 void *v;
4822 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004823
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004824 preempt_disable();
4825 arch_spin_lock(&trace_cmdline_lock);
4826
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004827 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004828 while (l <= *pos) {
4829 v = saved_cmdlines_next(m, v, &l);
4830 if (!v)
4831 return NULL;
4832 }
4833
4834 return v;
4835}
4836
4837static void saved_cmdlines_stop(struct seq_file *m, void *v)
4838{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004839 arch_spin_unlock(&trace_cmdline_lock);
4840 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004841}
4842
4843static int saved_cmdlines_show(struct seq_file *m, void *v)
4844{
4845 char buf[TASK_COMM_LEN];
4846 unsigned int *pid = v;
4847
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004848 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004849 seq_printf(m, "%d %s\n", *pid, buf);
4850 return 0;
4851}
4852
4853static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4854 .start = saved_cmdlines_start,
4855 .next = saved_cmdlines_next,
4856 .stop = saved_cmdlines_stop,
4857 .show = saved_cmdlines_show,
4858};
4859
4860static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4861{
4862 if (tracing_disabled)
4863 return -ENODEV;
4864
4865 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004866}
4867
4868static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004869 .open = tracing_saved_cmdlines_open,
4870 .read = seq_read,
4871 .llseek = seq_lseek,
4872 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004873};
4874
4875static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004876tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4877 size_t cnt, loff_t *ppos)
4878{
4879 char buf[64];
4880 int r;
4881
4882 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004883 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004884 arch_spin_unlock(&trace_cmdline_lock);
4885
4886 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4887}
4888
4889static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4890{
4891 kfree(s->saved_cmdlines);
4892 kfree(s->map_cmdline_to_pid);
4893 kfree(s);
4894}
4895
4896static int tracing_resize_saved_cmdlines(unsigned int val)
4897{
4898 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4899
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004900 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004901 if (!s)
4902 return -ENOMEM;
4903
4904 if (allocate_cmdlines_buffer(val, s) < 0) {
4905 kfree(s);
4906 return -ENOMEM;
4907 }
4908
4909 arch_spin_lock(&trace_cmdline_lock);
4910 savedcmd_temp = savedcmd;
4911 savedcmd = s;
4912 arch_spin_unlock(&trace_cmdline_lock);
4913 free_saved_cmdlines_buffer(savedcmd_temp);
4914
4915 return 0;
4916}
4917
4918static ssize_t
4919tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4920 size_t cnt, loff_t *ppos)
4921{
4922 unsigned long val;
4923 int ret;
4924
4925 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4926 if (ret)
4927 return ret;
4928
4929 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4930 if (!val || val > PID_MAX_DEFAULT)
4931 return -EINVAL;
4932
4933 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4934 if (ret < 0)
4935 return ret;
4936
4937 *ppos += cnt;
4938
4939 return cnt;
4940}
4941
4942static const struct file_operations tracing_saved_cmdlines_size_fops = {
4943 .open = tracing_open_generic,
4944 .read = tracing_saved_cmdlines_size_read,
4945 .write = tracing_saved_cmdlines_size_write,
4946};
4947
Jeremy Linton681bec02017-05-31 16:56:53 -05004948#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004949static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004950update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004951{
Jeremy Linton00f4b652017-05-31 16:56:43 -05004952 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004953 if (ptr->tail.next) {
4954 ptr = ptr->tail.next;
4955 /* Set ptr to the next real item (skip head) */
4956 ptr++;
4957 } else
4958 return NULL;
4959 }
4960 return ptr;
4961}
4962
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004963static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004964{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004965 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004966
4967 /*
4968 * Paranoid! If ptr points to end, we don't want to increment past it.
4969 * This really should never happen.
4970 */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004971 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004972 if (WARN_ON_ONCE(!ptr))
4973 return NULL;
4974
4975 ptr++;
4976
4977 (*pos)++;
4978
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004979 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004980
4981 return ptr;
4982}
4983
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004984static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004985{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004986 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004987 loff_t l = 0;
4988
Jeremy Linton1793ed92017-05-31 16:56:46 -05004989 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004990
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004991 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004992 if (v)
4993 v++;
4994
4995 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004996 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004997 }
4998
4999 return v;
5000}
5001
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005002static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005003{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005004 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005005}
5006
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005007static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005008{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005009 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005010
5011 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005012 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005013 ptr->map.system);
5014
5015 return 0;
5016}
5017
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005018static const struct seq_operations tracing_eval_map_seq_ops = {
5019 .start = eval_map_start,
5020 .next = eval_map_next,
5021 .stop = eval_map_stop,
5022 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005023};
5024
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005025static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005026{
5027 if (tracing_disabled)
5028 return -ENODEV;
5029
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005030 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005031}
5032
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005033static const struct file_operations tracing_eval_map_fops = {
5034 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005035 .read = seq_read,
5036 .llseek = seq_lseek,
5037 .release = seq_release,
5038};
5039
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005040static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005041trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005042{
5043 /* Return tail of array given the head */
5044 return ptr + ptr->head.length + 1;
5045}
5046
5047static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005048trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005049 int len)
5050{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005051 struct trace_eval_map **stop;
5052 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005053 union trace_eval_map_item *map_array;
5054 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005055
5056 stop = start + len;
5057
5058 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005059 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005060 * where the head holds the module and length of array, and the
5061 * tail holds a pointer to the next list.
5062 */
5063 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
5064 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005065 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005066 return;
5067 }
5068
Jeremy Linton1793ed92017-05-31 16:56:46 -05005069 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005070
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005071 if (!trace_eval_maps)
5072 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005073 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005074 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005075 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005076 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005077 if (!ptr->tail.next)
5078 break;
5079 ptr = ptr->tail.next;
5080
5081 }
5082 ptr->tail.next = map_array;
5083 }
5084 map_array->head.mod = mod;
5085 map_array->head.length = len;
5086 map_array++;
5087
5088 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5089 map_array->map = **map;
5090 map_array++;
5091 }
5092 memset(map_array, 0, sizeof(*map_array));
5093
Jeremy Linton1793ed92017-05-31 16:56:46 -05005094 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005095}
5096
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005097static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005098{
Jeremy Linton681bec02017-05-31 16:56:53 -05005099 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005100 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005101}
5102
Jeremy Linton681bec02017-05-31 16:56:53 -05005103#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005104static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5105static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005106 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005107#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005108
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005109static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005110 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005111{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005112 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005113
5114 if (len <= 0)
5115 return;
5116
5117 map = start;
5118
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005119 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005120
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005121 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005122}
5123
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005124static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005125tracing_set_trace_read(struct file *filp, char __user *ubuf,
5126 size_t cnt, loff_t *ppos)
5127{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005128 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005129 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005130 int r;
5131
5132 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005133 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005134 mutex_unlock(&trace_types_lock);
5135
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005136 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005137}
5138
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005139int tracer_init(struct tracer *t, struct trace_array *tr)
5140{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005141 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005142 return t->init(tr);
5143}
5144
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005145static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005146{
5147 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005148
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005149 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005150 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005151}
5152
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005153#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005154/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005155static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5156 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005157{
5158 int cpu, ret = 0;
5159
5160 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5161 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005162 ret = ring_buffer_resize(trace_buf->buffer,
5163 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005164 if (ret < 0)
5165 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005166 per_cpu_ptr(trace_buf->data, cpu)->entries =
5167 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005168 }
5169 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005170 ret = ring_buffer_resize(trace_buf->buffer,
5171 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005172 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005173 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5174 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005175 }
5176
5177 return ret;
5178}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005179#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005180
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005181static int __tracing_resize_ring_buffer(struct trace_array *tr,
5182 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005183{
5184 int ret;
5185
5186 /*
5187 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005188 * we use the size that was given, and we can forget about
5189 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005190 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005191 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005192
Steven Rostedtb382ede62012-10-10 21:44:34 -04005193 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005194 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005195 return 0;
5196
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005197 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005198 if (ret < 0)
5199 return ret;
5200
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005201#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005202 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5203 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005204 goto out;
5205
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005206 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005207 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005208 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5209 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005210 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005211 /*
5212 * AARGH! We are left with different
5213 * size max buffer!!!!
5214 * The max buffer is our "snapshot" buffer.
5215 * When a tracer needs a snapshot (one of the
5216 * latency tracers), it swaps the max buffer
5217 * with the saved snap shot. We succeeded to
5218 * update the size of the main buffer, but failed to
5219 * update the size of the max buffer. But when we tried
5220 * to reset the main buffer to the original size, we
5221 * failed there too. This is very unlikely to
5222 * happen, but if it does, warn and kill all
5223 * tracing.
5224 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005225 WARN_ON(1);
5226 tracing_disabled = 1;
5227 }
5228 return ret;
5229 }
5230
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005231 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005232 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005233 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005234 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005235
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005236 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005237#endif /* CONFIG_TRACER_MAX_TRACE */
5238
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005239 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005240 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005241 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005242 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005243
5244 return ret;
5245}
5246
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005247static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5248 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005249{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005250 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005251
5252 mutex_lock(&trace_types_lock);
5253
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005254 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5255 /* make sure, this cpu is enabled in the mask */
5256 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5257 ret = -EINVAL;
5258 goto out;
5259 }
5260 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005261
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005262 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005263 if (ret < 0)
5264 ret = -ENOMEM;
5265
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005266out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005267 mutex_unlock(&trace_types_lock);
5268
5269 return ret;
5270}
5271
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005272
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005273/**
5274 * tracing_update_buffers - used by tracing facility to expand ring buffers
5275 *
5276 * To save on memory when the tracing is never used on a system with it
5277 * configured in. The ring buffers are set to a minimum size. But once
5278 * a user starts to use the tracing facility, then they need to grow
5279 * to their default size.
5280 *
5281 * This function is to be called when a tracer is about to be used.
5282 */
5283int tracing_update_buffers(void)
5284{
5285 int ret = 0;
5286
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005287 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005288 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005289 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005290 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005291 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005292
5293 return ret;
5294}
5295
Steven Rostedt577b7852009-02-26 23:43:05 -05005296struct trace_option_dentry;
5297
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005298static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005299create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005300
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005301/*
5302 * Used to clear out the tracer before deletion of an instance.
5303 * Must have trace_types_lock held.
5304 */
5305static void tracing_set_nop(struct trace_array *tr)
5306{
5307 if (tr->current_trace == &nop_trace)
5308 return;
5309
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005310 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005311
5312 if (tr->current_trace->reset)
5313 tr->current_trace->reset(tr);
5314
5315 tr->current_trace = &nop_trace;
5316}
5317
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005318static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005319{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005320 /* Only enable if the directory has been created already. */
5321 if (!tr->dir)
5322 return;
5323
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005324 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005325}
5326
5327static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5328{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005329 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005330#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005331 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005332#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005333 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005334
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005335 mutex_lock(&trace_types_lock);
5336
Steven Rostedt73c51622009-03-11 13:42:01 -04005337 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005338 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005339 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005340 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005341 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005342 ret = 0;
5343 }
5344
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005345 for (t = trace_types; t; t = t->next) {
5346 if (strcmp(t->name, buf) == 0)
5347 break;
5348 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005349 if (!t) {
5350 ret = -EINVAL;
5351 goto out;
5352 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005353 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005354 goto out;
5355
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005356 /* Some tracers won't work on kernel command line */
5357 if (system_state < SYSTEM_RUNNING && t->noboot) {
5358 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5359 t->name);
5360 goto out;
5361 }
5362
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005363 /* Some tracers are only allowed for the top level buffer */
5364 if (!trace_ok_for_array(t, tr)) {
5365 ret = -EINVAL;
5366 goto out;
5367 }
5368
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005369 /* If trace pipe files are being read, we can't change the tracer */
5370 if (tr->current_trace->ref) {
5371 ret = -EBUSY;
5372 goto out;
5373 }
5374
Steven Rostedt9f029e82008-11-12 15:24:24 -05005375 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005376
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005377 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005378
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005379 if (tr->current_trace->reset)
5380 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005381
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005382 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005383 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005384
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005385#ifdef CONFIG_TRACER_MAX_TRACE
5386 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005387
5388 if (had_max_tr && !t->use_max_tr) {
5389 /*
5390 * We need to make sure that the update_max_tr sees that
5391 * current_trace changed to nop_trace to keep it from
5392 * swapping the buffers after we resize it.
5393 * The update_max_tr is called from interrupts disabled
5394 * so a synchronized_sched() is sufficient.
5395 */
5396 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005397 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005398 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005399#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005400
5401#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005402 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005403 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005404 if (ret < 0)
5405 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005406 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005407#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005408
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005409 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005410 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005411 if (ret)
5412 goto out;
5413 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005414
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005415 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005416 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005417 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005418 out:
5419 mutex_unlock(&trace_types_lock);
5420
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005421 return ret;
5422}
5423
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005424static ssize_t
5425tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5426 size_t cnt, loff_t *ppos)
5427{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005428 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005429 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005430 int i;
5431 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005432 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005433
Steven Rostedt60063a62008-10-28 10:44:24 -04005434 ret = cnt;
5435
Li Zefanee6c2c12009-09-18 14:06:47 +08005436 if (cnt > MAX_TRACER_SIZE)
5437 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005438
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005439 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005440 return -EFAULT;
5441
5442 buf[cnt] = 0;
5443
5444 /* strip ending whitespace. */
5445 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5446 buf[i] = 0;
5447
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005448 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005449 if (err)
5450 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005451
Jiri Olsacf8517c2009-10-23 19:36:16 -04005452 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005453
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005454 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005455}
5456
5457static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005458tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5459 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005460{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005461 char buf[64];
5462 int r;
5463
Steven Rostedtcffae432008-05-12 21:21:00 +02005464 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005465 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005466 if (r > sizeof(buf))
5467 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005468 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005469}
5470
5471static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005472tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5473 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005474{
Hannes Eder5e398412009-02-10 19:44:34 +01005475 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005476 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005477
Peter Huewe22fe9b52011-06-07 21:58:27 +02005478 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5479 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005480 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005481
5482 *ptr = val * 1000;
5483
5484 return cnt;
5485}
5486
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005487static ssize_t
5488tracing_thresh_read(struct file *filp, char __user *ubuf,
5489 size_t cnt, loff_t *ppos)
5490{
5491 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5492}
5493
5494static ssize_t
5495tracing_thresh_write(struct file *filp, const char __user *ubuf,
5496 size_t cnt, loff_t *ppos)
5497{
5498 struct trace_array *tr = filp->private_data;
5499 int ret;
5500
5501 mutex_lock(&trace_types_lock);
5502 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5503 if (ret < 0)
5504 goto out;
5505
5506 if (tr->current_trace->update_thresh) {
5507 ret = tr->current_trace->update_thresh(tr);
5508 if (ret < 0)
5509 goto out;
5510 }
5511
5512 ret = cnt;
5513out:
5514 mutex_unlock(&trace_types_lock);
5515
5516 return ret;
5517}
5518
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005519#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005520
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005521static ssize_t
5522tracing_max_lat_read(struct file *filp, char __user *ubuf,
5523 size_t cnt, loff_t *ppos)
5524{
5525 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5526}
5527
5528static ssize_t
5529tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5530 size_t cnt, loff_t *ppos)
5531{
5532 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5533}
5534
Chen Gange428abb2015-11-10 05:15:15 +08005535#endif
5536
Steven Rostedtb3806b42008-05-12 21:20:46 +02005537static int tracing_open_pipe(struct inode *inode, struct file *filp)
5538{
Oleg Nesterov15544202013-07-23 17:25:57 +02005539 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005540 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005541 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005542
5543 if (tracing_disabled)
5544 return -ENODEV;
5545
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005546 if (trace_array_get(tr) < 0)
5547 return -ENODEV;
5548
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005549 mutex_lock(&trace_types_lock);
5550
Steven Rostedtb3806b42008-05-12 21:20:46 +02005551 /* create a buffer to store the information to pass to userspace */
5552 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005553 if (!iter) {
5554 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005555 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005556 goto out;
5557 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005558
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005559 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005560 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005561
5562 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5563 ret = -ENOMEM;
5564 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305565 }
5566
Steven Rostedta3097202008-11-07 22:36:02 -05005567 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305568 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005569
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005570 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005571 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5572
David Sharp8be07092012-11-13 12:18:22 -08005573 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005574 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005575 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5576
Oleg Nesterov15544202013-07-23 17:25:57 +02005577 iter->tr = tr;
5578 iter->trace_buffer = &tr->trace_buffer;
5579 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005580 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005581 filp->private_data = iter;
5582
Steven Rostedt107bad82008-05-12 21:21:01 +02005583 if (iter->trace->pipe_open)
5584 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005585
Arnd Bergmannb4447862010-07-07 23:40:11 +02005586 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005587
5588 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005589out:
5590 mutex_unlock(&trace_types_lock);
5591 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005592
5593fail:
5594 kfree(iter->trace);
5595 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005596 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005597 mutex_unlock(&trace_types_lock);
5598 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005599}
5600
5601static int tracing_release_pipe(struct inode *inode, struct file *file)
5602{
5603 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005604 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005605
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005606 mutex_lock(&trace_types_lock);
5607
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005608 tr->current_trace->ref--;
5609
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005610 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005611 iter->trace->pipe_close(iter);
5612
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005613 mutex_unlock(&trace_types_lock);
5614
Rusty Russell44623442009-01-01 10:12:23 +10305615 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005616 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005617 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005618
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005619 trace_array_put(tr);
5620
Steven Rostedtb3806b42008-05-12 21:20:46 +02005621 return 0;
5622}
5623
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005624static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005625trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005626{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005627 struct trace_array *tr = iter->tr;
5628
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005629 /* Iterators are static, they should be filled or empty */
5630 if (trace_buffer_iter(iter, iter->cpu_file))
5631 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005632
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005633 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005634 /*
5635 * Always select as readable when in blocking mode
5636 */
5637 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005638 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005639 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005640 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005641}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005642
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005643static unsigned int
5644tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5645{
5646 struct trace_iterator *iter = filp->private_data;
5647
5648 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005649}
5650
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005651/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005652static int tracing_wait_pipe(struct file *filp)
5653{
5654 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005655 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005656
5657 while (trace_empty(iter)) {
5658
5659 if ((filp->f_flags & O_NONBLOCK)) {
5660 return -EAGAIN;
5661 }
5662
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005663 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005664 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005665 * We still block if tracing is disabled, but we have never
5666 * read anything. This allows a user to cat this file, and
5667 * then enable tracing. But after we have read something,
5668 * we give an EOF when tracing is again disabled.
5669 *
5670 * iter->pos will be 0 if we haven't read anything.
5671 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07005672 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005673 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005674
5675 mutex_unlock(&iter->mutex);
5676
Rabin Vincente30f53a2014-11-10 19:46:34 +01005677 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005678
5679 mutex_lock(&iter->mutex);
5680
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005681 if (ret)
5682 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005683 }
5684
5685 return 1;
5686}
5687
Steven Rostedtb3806b42008-05-12 21:20:46 +02005688/*
5689 * Consumer reader.
5690 */
5691static ssize_t
5692tracing_read_pipe(struct file *filp, char __user *ubuf,
5693 size_t cnt, loff_t *ppos)
5694{
5695 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005696 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005697
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005698 /*
5699 * Avoid more than one consumer on a single file descriptor
5700 * This is just a matter of traces coherency, the ring buffer itself
5701 * is protected.
5702 */
5703 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005704
5705 /* return any leftover data */
5706 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5707 if (sret != -EBUSY)
5708 goto out;
5709
5710 trace_seq_init(&iter->seq);
5711
Steven Rostedt107bad82008-05-12 21:21:01 +02005712 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005713 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5714 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005715 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005716 }
5717
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005718waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005719 sret = tracing_wait_pipe(filp);
5720 if (sret <= 0)
5721 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005722
5723 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005724 if (trace_empty(iter)) {
5725 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005726 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005727 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005728
5729 if (cnt >= PAGE_SIZE)
5730 cnt = PAGE_SIZE - 1;
5731
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005732 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005733 memset(&iter->seq, 0,
5734 sizeof(struct trace_iterator) -
5735 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005736 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005737 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005738
Lai Jiangshan4f535962009-05-18 19:35:34 +08005739 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005740 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005741 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005742 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005743 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005744
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005745 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005746 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005747 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005748 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005749 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005750 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005751 if (ret != TRACE_TYPE_NO_CONSUME)
5752 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005753
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005754 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005755 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005756
5757 /*
5758 * Setting the full flag means we reached the trace_seq buffer
5759 * size and we should leave by partial output condition above.
5760 * One of the trace_seq_* functions is not used properly.
5761 */
5762 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5763 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005764 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005765 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005766 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005767
Steven Rostedtb3806b42008-05-12 21:20:46 +02005768 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005769 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005770 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005771 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005772
5773 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005774 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005775 * entries, go back to wait for more entries.
5776 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005777 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005778 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005779
Steven Rostedt107bad82008-05-12 21:21:01 +02005780out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005781 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005782
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005783 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005784}
5785
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005786static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5787 unsigned int idx)
5788{
5789 __free_page(spd->pages[idx]);
5790}
5791
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005792static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005793 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005794 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005795 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005796 .steal = generic_pipe_buf_steal,
5797 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005798};
5799
Steven Rostedt34cd4992009-02-09 12:06:29 -05005800static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005801tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005802{
5803 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005804 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005805 int ret;
5806
5807 /* Seq buffer is page-sized, exactly what we need. */
5808 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005809 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005810 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005811
5812 if (trace_seq_has_overflowed(&iter->seq)) {
5813 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005814 break;
5815 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005816
5817 /*
5818 * This should not be hit, because it should only
5819 * be set if the iter->seq overflowed. But check it
5820 * anyway to be safe.
5821 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005822 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005823 iter->seq.seq.len = save_len;
5824 break;
5825 }
5826
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005827 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005828 if (rem < count) {
5829 rem = 0;
5830 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005831 break;
5832 }
5833
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005834 if (ret != TRACE_TYPE_NO_CONSUME)
5835 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005836 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005837 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005838 rem = 0;
5839 iter->ent = NULL;
5840 break;
5841 }
5842 }
5843
5844 return rem;
5845}
5846
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005847static ssize_t tracing_splice_read_pipe(struct file *filp,
5848 loff_t *ppos,
5849 struct pipe_inode_info *pipe,
5850 size_t len,
5851 unsigned int flags)
5852{
Jens Axboe35f3d142010-05-20 10:43:18 +02005853 struct page *pages_def[PIPE_DEF_BUFFERS];
5854 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005855 struct trace_iterator *iter = filp->private_data;
5856 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005857 .pages = pages_def,
5858 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005859 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005860 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005861 .ops = &tracing_pipe_buf_ops,
5862 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005863 };
5864 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005865 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005866 unsigned int i;
5867
Jens Axboe35f3d142010-05-20 10:43:18 +02005868 if (splice_grow_spd(pipe, &spd))
5869 return -ENOMEM;
5870
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005871 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005872
5873 if (iter->trace->splice_read) {
5874 ret = iter->trace->splice_read(iter, filp,
5875 ppos, pipe, len, flags);
5876 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005877 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005878 }
5879
5880 ret = tracing_wait_pipe(filp);
5881 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005882 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005883
Jason Wessel955b61e2010-08-05 09:22:23 -05005884 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005885 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005886 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005887 }
5888
Lai Jiangshan4f535962009-05-18 19:35:34 +08005889 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005890 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005891
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005892 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005893 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005894 spd.pages[i] = alloc_page(GFP_KERNEL);
5895 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005896 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005897
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005898 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005899
5900 /* Copy the data into the page, so we can start over. */
5901 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005902 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005903 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005904 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005905 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005906 break;
5907 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005908 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005909 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005910
Steven Rostedtf9520752009-03-02 14:04:40 -05005911 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005912 }
5913
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005914 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005915 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005916 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005917
5918 spd.nr_pages = i;
5919
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005920 if (i)
5921 ret = splice_to_pipe(pipe, &spd);
5922 else
5923 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005924out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005925 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005926 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005927
Steven Rostedt34cd4992009-02-09 12:06:29 -05005928out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005929 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005930 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005931}
5932
Steven Rostedta98a3c32008-05-12 21:20:59 +02005933static ssize_t
5934tracing_entries_read(struct file *filp, char __user *ubuf,
5935 size_t cnt, loff_t *ppos)
5936{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005937 struct inode *inode = file_inode(filp);
5938 struct trace_array *tr = inode->i_private;
5939 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005940 char buf[64];
5941 int r = 0;
5942 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005943
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005944 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005945
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005946 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005947 int cpu, buf_size_same;
5948 unsigned long size;
5949
5950 size = 0;
5951 buf_size_same = 1;
5952 /* check if all cpu sizes are same */
5953 for_each_tracing_cpu(cpu) {
5954 /* fill in the size from first enabled cpu */
5955 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005956 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5957 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005958 buf_size_same = 0;
5959 break;
5960 }
5961 }
5962
5963 if (buf_size_same) {
5964 if (!ring_buffer_expanded)
5965 r = sprintf(buf, "%lu (expanded: %lu)\n",
5966 size >> 10,
5967 trace_buf_size >> 10);
5968 else
5969 r = sprintf(buf, "%lu\n", size >> 10);
5970 } else
5971 r = sprintf(buf, "X\n");
5972 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005973 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005974
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005975 mutex_unlock(&trace_types_lock);
5976
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005977 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5978 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005979}
5980
5981static ssize_t
5982tracing_entries_write(struct file *filp, const char __user *ubuf,
5983 size_t cnt, loff_t *ppos)
5984{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005985 struct inode *inode = file_inode(filp);
5986 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005987 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005988 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005989
Peter Huewe22fe9b52011-06-07 21:58:27 +02005990 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5991 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005992 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005993
5994 /* must have at least 1 entry */
5995 if (!val)
5996 return -EINVAL;
5997
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005998 /* value is in KB */
5999 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006000 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006001 if (ret < 0)
6002 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006003
Jiri Olsacf8517c2009-10-23 19:36:16 -04006004 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006005
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006006 return cnt;
6007}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006008
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006009static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006010tracing_total_entries_read(struct file *filp, char __user *ubuf,
6011 size_t cnt, loff_t *ppos)
6012{
6013 struct trace_array *tr = filp->private_data;
6014 char buf[64];
6015 int r, cpu;
6016 unsigned long size = 0, expanded_size = 0;
6017
6018 mutex_lock(&trace_types_lock);
6019 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006020 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006021 if (!ring_buffer_expanded)
6022 expanded_size += trace_buf_size >> 10;
6023 }
6024 if (ring_buffer_expanded)
6025 r = sprintf(buf, "%lu\n", size);
6026 else
6027 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6028 mutex_unlock(&trace_types_lock);
6029
6030 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6031}
6032
6033static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006034tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6035 size_t cnt, loff_t *ppos)
6036{
6037 /*
6038 * There is no need to read what the user has written, this function
6039 * is just to make sure that there is no error when "echo" is used
6040 */
6041
6042 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006043
6044 return cnt;
6045}
6046
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006047static int
6048tracing_free_buffer_release(struct inode *inode, struct file *filp)
6049{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006050 struct trace_array *tr = inode->i_private;
6051
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006052 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006053 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006054 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006055 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006056 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006057
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006058 trace_array_put(tr);
6059
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006060 return 0;
6061}
6062
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006063static ssize_t
6064tracing_mark_write(struct file *filp, const char __user *ubuf,
6065 size_t cnt, loff_t *fpos)
6066{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006067 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006068 struct ring_buffer_event *event;
6069 struct ring_buffer *buffer;
6070 struct print_entry *entry;
6071 unsigned long irq_flags;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006072 const char faulted[] = "<faulted>";
Steven Rostedtd696b582011-09-22 11:50:27 -04006073 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006074 int size;
6075 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006076
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006077/* Used in tracing_mark_raw_write() as well */
6078#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006079
Steven Rostedtc76f0692008-11-07 22:36:02 -05006080 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006081 return -EINVAL;
6082
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006083 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006084 return -EINVAL;
6085
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006086 if (cnt > TRACE_BUF_SIZE)
6087 cnt = TRACE_BUF_SIZE;
6088
Steven Rostedtd696b582011-09-22 11:50:27 -04006089 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006090
Steven Rostedtd696b582011-09-22 11:50:27 -04006091 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006092 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6093
6094 /* If less than "<faulted>", then make sure we can still add that */
6095 if (cnt < FAULTED_SIZE)
6096 size += FAULTED_SIZE - cnt;
6097
Alexander Z Lam2d716192013-07-01 15:31:24 -07006098 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006099 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6100 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006101 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006102 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006103 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006104
6105 entry = ring_buffer_event_data(event);
6106 entry->ip = _THIS_IP_;
6107
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006108 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6109 if (len) {
6110 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6111 cnt = FAULTED_SIZE;
6112 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006113 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006114 written = cnt;
6115 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006116
6117 if (entry->buf[cnt - 1] != '\n') {
6118 entry->buf[cnt] = '\n';
6119 entry->buf[cnt + 1] = '\0';
6120 } else
6121 entry->buf[cnt] = '\0';
6122
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006123 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006124
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006125 if (written > 0)
6126 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006127
Steven Rostedtfa32e852016-07-06 15:25:08 -04006128 return written;
6129}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006130
Steven Rostedtfa32e852016-07-06 15:25:08 -04006131/* Limit it for now to 3K (including tag) */
6132#define RAW_DATA_MAX_SIZE (1024*3)
6133
6134static ssize_t
6135tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6136 size_t cnt, loff_t *fpos)
6137{
6138 struct trace_array *tr = filp->private_data;
6139 struct ring_buffer_event *event;
6140 struct ring_buffer *buffer;
6141 struct raw_data_entry *entry;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006142 const char faulted[] = "<faulted>";
Steven Rostedtfa32e852016-07-06 15:25:08 -04006143 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006144 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006145 int size;
6146 int len;
6147
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006148#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6149
Steven Rostedtfa32e852016-07-06 15:25:08 -04006150 if (tracing_disabled)
6151 return -EINVAL;
6152
6153 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6154 return -EINVAL;
6155
6156 /* The marker must at least have a tag id */
6157 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6158 return -EINVAL;
6159
6160 if (cnt > TRACE_BUF_SIZE)
6161 cnt = TRACE_BUF_SIZE;
6162
6163 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6164
Steven Rostedtfa32e852016-07-06 15:25:08 -04006165 local_save_flags(irq_flags);
6166 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006167 if (cnt < FAULT_SIZE_ID)
6168 size += FAULT_SIZE_ID - cnt;
6169
Steven Rostedtfa32e852016-07-06 15:25:08 -04006170 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006171 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6172 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006173 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006174 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006175 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006176
6177 entry = ring_buffer_event_data(event);
6178
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006179 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6180 if (len) {
6181 entry->id = -1;
6182 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6183 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006184 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006185 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006186
6187 __buffer_unlock_commit(buffer, event);
6188
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006189 if (written > 0)
6190 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006191
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006192 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006193}
6194
Li Zefan13f16d22009-12-08 11:16:11 +08006195static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006196{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006197 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006198 int i;
6199
6200 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006201 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006202 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006203 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6204 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006205 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006206
Li Zefan13f16d22009-12-08 11:16:11 +08006207 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006208}
6209
Steven Rostedte1e232c2014-02-10 23:38:46 -05006210static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006211{
Zhaolei5079f322009-08-25 16:12:56 +08006212 int i;
6213
Zhaolei5079f322009-08-25 16:12:56 +08006214 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6215 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6216 break;
6217 }
6218 if (i == ARRAY_SIZE(trace_clocks))
6219 return -EINVAL;
6220
Zhaolei5079f322009-08-25 16:12:56 +08006221 mutex_lock(&trace_types_lock);
6222
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006223 tr->clock_id = i;
6224
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006225 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006226
David Sharp60303ed2012-10-11 16:27:52 -07006227 /*
6228 * New clock may not be consistent with the previous clock.
6229 * Reset the buffer so that it doesn't have incomparable timestamps.
6230 */
Alexander Z Lam94571582013-08-02 18:36:16 -07006231 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006232
6233#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006234 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006235 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006236 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006237#endif
David Sharp60303ed2012-10-11 16:27:52 -07006238
Zhaolei5079f322009-08-25 16:12:56 +08006239 mutex_unlock(&trace_types_lock);
6240
Steven Rostedte1e232c2014-02-10 23:38:46 -05006241 return 0;
6242}
6243
6244static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6245 size_t cnt, loff_t *fpos)
6246{
6247 struct seq_file *m = filp->private_data;
6248 struct trace_array *tr = m->private;
6249 char buf[64];
6250 const char *clockstr;
6251 int ret;
6252
6253 if (cnt >= sizeof(buf))
6254 return -EINVAL;
6255
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006256 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006257 return -EFAULT;
6258
6259 buf[cnt] = 0;
6260
6261 clockstr = strstrip(buf);
6262
6263 ret = tracing_set_clock(tr, clockstr);
6264 if (ret)
6265 return ret;
6266
Zhaolei5079f322009-08-25 16:12:56 +08006267 *fpos += cnt;
6268
6269 return cnt;
6270}
6271
Li Zefan13f16d22009-12-08 11:16:11 +08006272static int tracing_clock_open(struct inode *inode, struct file *file)
6273{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006274 struct trace_array *tr = inode->i_private;
6275 int ret;
6276
Li Zefan13f16d22009-12-08 11:16:11 +08006277 if (tracing_disabled)
6278 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006279
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006280 if (trace_array_get(tr))
6281 return -ENODEV;
6282
6283 ret = single_open(file, tracing_clock_show, inode->i_private);
6284 if (ret < 0)
6285 trace_array_put(tr);
6286
6287 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006288}
6289
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006290struct ftrace_buffer_info {
6291 struct trace_iterator iter;
6292 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006293 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006294 unsigned int read;
6295};
6296
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006297#ifdef CONFIG_TRACER_SNAPSHOT
6298static int tracing_snapshot_open(struct inode *inode, struct file *file)
6299{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006300 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006301 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006302 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006303 int ret = 0;
6304
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006305 if (trace_array_get(tr) < 0)
6306 return -ENODEV;
6307
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006308 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006309 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006310 if (IS_ERR(iter))
6311 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006312 } else {
6313 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006314 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006315 m = kzalloc(sizeof(*m), GFP_KERNEL);
6316 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006317 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006318 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6319 if (!iter) {
6320 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006321 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006322 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006323 ret = 0;
6324
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006325 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006326 iter->trace_buffer = &tr->max_buffer;
6327 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006328 m->private = iter;
6329 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006330 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006331out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006332 if (ret < 0)
6333 trace_array_put(tr);
6334
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006335 return ret;
6336}
6337
6338static ssize_t
6339tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6340 loff_t *ppos)
6341{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006342 struct seq_file *m = filp->private_data;
6343 struct trace_iterator *iter = m->private;
6344 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006345 unsigned long val;
6346 int ret;
6347
6348 ret = tracing_update_buffers();
6349 if (ret < 0)
6350 return ret;
6351
6352 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6353 if (ret)
6354 return ret;
6355
6356 mutex_lock(&trace_types_lock);
6357
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006358 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006359 ret = -EBUSY;
6360 goto out;
6361 }
6362
6363 switch (val) {
6364 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006365 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6366 ret = -EINVAL;
6367 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006368 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006369 if (tr->allocated_snapshot)
6370 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006371 break;
6372 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006373/* Only allow per-cpu swap if the ring buffer supports it */
6374#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6375 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6376 ret = -EINVAL;
6377 break;
6378 }
6379#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006380 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006381 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006382 if (ret < 0)
6383 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006384 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006385 local_irq_disable();
6386 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006387 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006388 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006389 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006390 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006391 local_irq_enable();
6392 break;
6393 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006394 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006395 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6396 tracing_reset_online_cpus(&tr->max_buffer);
6397 else
6398 tracing_reset(&tr->max_buffer, iter->cpu_file);
6399 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006400 break;
6401 }
6402
6403 if (ret >= 0) {
6404 *ppos += cnt;
6405 ret = cnt;
6406 }
6407out:
6408 mutex_unlock(&trace_types_lock);
6409 return ret;
6410}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006411
6412static int tracing_snapshot_release(struct inode *inode, struct file *file)
6413{
6414 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006415 int ret;
6416
6417 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006418
6419 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006420 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006421
6422 /* If write only, the seq_file is just a stub */
6423 if (m)
6424 kfree(m->private);
6425 kfree(m);
6426
6427 return 0;
6428}
6429
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006430static int tracing_buffers_open(struct inode *inode, struct file *filp);
6431static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6432 size_t count, loff_t *ppos);
6433static int tracing_buffers_release(struct inode *inode, struct file *file);
6434static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6435 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6436
6437static int snapshot_raw_open(struct inode *inode, struct file *filp)
6438{
6439 struct ftrace_buffer_info *info;
6440 int ret;
6441
6442 ret = tracing_buffers_open(inode, filp);
6443 if (ret < 0)
6444 return ret;
6445
6446 info = filp->private_data;
6447
6448 if (info->iter.trace->use_max_tr) {
6449 tracing_buffers_release(inode, filp);
6450 return -EBUSY;
6451 }
6452
6453 info->iter.snapshot = true;
6454 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6455
6456 return ret;
6457}
6458
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006459#endif /* CONFIG_TRACER_SNAPSHOT */
6460
6461
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006462static const struct file_operations tracing_thresh_fops = {
6463 .open = tracing_open_generic,
6464 .read = tracing_thresh_read,
6465 .write = tracing_thresh_write,
6466 .llseek = generic_file_llseek,
6467};
6468
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006469#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006470static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006471 .open = tracing_open_generic,
6472 .read = tracing_max_lat_read,
6473 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006474 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006475};
Chen Gange428abb2015-11-10 05:15:15 +08006476#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006477
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006478static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006479 .open = tracing_open_generic,
6480 .read = tracing_set_trace_read,
6481 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006482 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006483};
6484
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006485static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006486 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006487 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006488 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006489 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006490 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006491 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006492};
6493
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006494static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006495 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006496 .read = tracing_entries_read,
6497 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006498 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006499 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006500};
6501
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006502static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006503 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006504 .read = tracing_total_entries_read,
6505 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006506 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006507};
6508
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006509static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006510 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006511 .write = tracing_free_buffer_write,
6512 .release = tracing_free_buffer_release,
6513};
6514
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006515static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006516 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006517 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006518 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006519 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006520};
6521
Steven Rostedtfa32e852016-07-06 15:25:08 -04006522static const struct file_operations tracing_mark_raw_fops = {
6523 .open = tracing_open_generic_tr,
6524 .write = tracing_mark_raw_write,
6525 .llseek = generic_file_llseek,
6526 .release = tracing_release_generic_tr,
6527};
6528
Zhaolei5079f322009-08-25 16:12:56 +08006529static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006530 .open = tracing_clock_open,
6531 .read = seq_read,
6532 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006533 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006534 .write = tracing_clock_write,
6535};
6536
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006537#ifdef CONFIG_TRACER_SNAPSHOT
6538static const struct file_operations snapshot_fops = {
6539 .open = tracing_snapshot_open,
6540 .read = seq_read,
6541 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006542 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006543 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006544};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006545
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006546static const struct file_operations snapshot_raw_fops = {
6547 .open = snapshot_raw_open,
6548 .read = tracing_buffers_read,
6549 .release = tracing_buffers_release,
6550 .splice_read = tracing_buffers_splice_read,
6551 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006552};
6553
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006554#endif /* CONFIG_TRACER_SNAPSHOT */
6555
Steven Rostedt2cadf912008-12-01 22:20:19 -05006556static int tracing_buffers_open(struct inode *inode, struct file *filp)
6557{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006558 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006559 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006560 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006561
6562 if (tracing_disabled)
6563 return -ENODEV;
6564
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006565 if (trace_array_get(tr) < 0)
6566 return -ENODEV;
6567
Steven Rostedt2cadf912008-12-01 22:20:19 -05006568 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006569 if (!info) {
6570 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006571 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006572 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006573
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006574 mutex_lock(&trace_types_lock);
6575
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006576 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006577 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006578 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006579 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006580 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006581 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006582 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006583
6584 filp->private_data = info;
6585
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006586 tr->current_trace->ref++;
6587
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006588 mutex_unlock(&trace_types_lock);
6589
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006590 ret = nonseekable_open(inode, filp);
6591 if (ret < 0)
6592 trace_array_put(tr);
6593
6594 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006595}
6596
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006597static unsigned int
6598tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6599{
6600 struct ftrace_buffer_info *info = filp->private_data;
6601 struct trace_iterator *iter = &info->iter;
6602
6603 return trace_poll(iter, filp, poll_table);
6604}
6605
Steven Rostedt2cadf912008-12-01 22:20:19 -05006606static ssize_t
6607tracing_buffers_read(struct file *filp, char __user *ubuf,
6608 size_t count, loff_t *ppos)
6609{
6610 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006611 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006612 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006613 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006614
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006615 if (!count)
6616 return 0;
6617
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006618#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006619 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6620 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006621#endif
6622
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006623 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006624 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6625 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006626 if (IS_ERR(info->spare)) {
6627 ret = PTR_ERR(info->spare);
6628 info->spare = NULL;
6629 } else {
6630 info->spare_cpu = iter->cpu_file;
6631 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006632 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006633 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006634 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006635
Steven Rostedt2cadf912008-12-01 22:20:19 -05006636 /* Do we have previous read data to read? */
6637 if (info->read < PAGE_SIZE)
6638 goto read;
6639
Steven Rostedtb6273442013-02-28 13:44:11 -05006640 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006641 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006642 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006643 &info->spare,
6644 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006645 iter->cpu_file, 0);
6646 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006647
6648 if (ret < 0) {
6649 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006650 if ((filp->f_flags & O_NONBLOCK))
6651 return -EAGAIN;
6652
Rabin Vincente30f53a2014-11-10 19:46:34 +01006653 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006654 if (ret)
6655 return ret;
6656
Steven Rostedtb6273442013-02-28 13:44:11 -05006657 goto again;
6658 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006659 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006660 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006661
Steven Rostedt436fc282011-10-14 10:44:25 -04006662 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006663 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006664 size = PAGE_SIZE - info->read;
6665 if (size > count)
6666 size = count;
6667
6668 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006669 if (ret == size)
6670 return -EFAULT;
6671
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006672 size -= ret;
6673
Steven Rostedt2cadf912008-12-01 22:20:19 -05006674 *ppos += size;
6675 info->read += size;
6676
6677 return size;
6678}
6679
6680static int tracing_buffers_release(struct inode *inode, struct file *file)
6681{
6682 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006683 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006684
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006685 mutex_lock(&trace_types_lock);
6686
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006687 iter->tr->current_trace->ref--;
6688
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006689 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006690
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006691 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006692 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6693 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006694 kfree(info);
6695
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006696 mutex_unlock(&trace_types_lock);
6697
Steven Rostedt2cadf912008-12-01 22:20:19 -05006698 return 0;
6699}
6700
6701struct buffer_ref {
6702 struct ring_buffer *buffer;
6703 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006704 int cpu;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006705 int ref;
6706};
6707
6708static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6709 struct pipe_buffer *buf)
6710{
6711 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6712
6713 if (--ref->ref)
6714 return;
6715
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006716 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006717 kfree(ref);
6718 buf->private = 0;
6719}
6720
Steven Rostedt2cadf912008-12-01 22:20:19 -05006721static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6722 struct pipe_buffer *buf)
6723{
6724 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6725
6726 ref->ref++;
6727}
6728
6729/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006730static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006731 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006732 .confirm = generic_pipe_buf_confirm,
6733 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006734 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006735 .get = buffer_pipe_buf_get,
6736};
6737
6738/*
6739 * Callback from splice_to_pipe(), if we need to release some pages
6740 * at the end of the spd in case we error'ed out in filling the pipe.
6741 */
6742static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6743{
6744 struct buffer_ref *ref =
6745 (struct buffer_ref *)spd->partial[i].private;
6746
6747 if (--ref->ref)
6748 return;
6749
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006750 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006751 kfree(ref);
6752 spd->partial[i].private = 0;
6753}
6754
6755static ssize_t
6756tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6757 struct pipe_inode_info *pipe, size_t len,
6758 unsigned int flags)
6759{
6760 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006761 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006762 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6763 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006764 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006765 .pages = pages_def,
6766 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006767 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006768 .ops = &buffer_pipe_buf_ops,
6769 .spd_release = buffer_spd_release,
6770 };
6771 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04006772 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006773 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006774
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006775#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006776 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6777 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006778#endif
6779
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006780 if (*ppos & (PAGE_SIZE - 1))
6781 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006782
6783 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006784 if (len < PAGE_SIZE)
6785 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006786 len &= PAGE_MASK;
6787 }
6788
Al Viro1ae22932016-09-17 18:31:46 -04006789 if (splice_grow_spd(pipe, &spd))
6790 return -ENOMEM;
6791
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006792 again:
6793 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006794 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006795
Al Viroa786c062014-04-11 12:01:03 -04006796 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006797 struct page *page;
6798 int r;
6799
6800 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006801 if (!ref) {
6802 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006803 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006804 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006805
Steven Rostedt7267fa62009-04-29 00:16:21 -04006806 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006807 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006808 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006809 if (IS_ERR(ref->page)) {
6810 ret = PTR_ERR(ref->page);
6811 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006812 kfree(ref);
6813 break;
6814 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006815 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006816
6817 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006818 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006819 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006820 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6821 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006822 kfree(ref);
6823 break;
6824 }
6825
6826 /*
6827 * zero out any left over data, this is going to
6828 * user land.
6829 */
6830 size = ring_buffer_page_len(ref->page);
6831 if (size < PAGE_SIZE)
6832 memset(ref->page + size, 0, PAGE_SIZE - size);
6833
6834 page = virt_to_page(ref->page);
6835
6836 spd.pages[i] = page;
6837 spd.partial[i].len = PAGE_SIZE;
6838 spd.partial[i].offset = 0;
6839 spd.partial[i].private = (unsigned long)ref;
6840 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006841 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006842
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006843 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006844 }
6845
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006846 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006847 spd.nr_pages = i;
6848
6849 /* did we read anything? */
6850 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006851 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006852 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006853
Al Viro1ae22932016-09-17 18:31:46 -04006854 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006855 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006856 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006857
Rabin Vincente30f53a2014-11-10 19:46:34 +01006858 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006859 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006860 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006861
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006862 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006863 }
6864
6865 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006866out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006867 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006868
Steven Rostedt2cadf912008-12-01 22:20:19 -05006869 return ret;
6870}
6871
6872static const struct file_operations tracing_buffers_fops = {
6873 .open = tracing_buffers_open,
6874 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006875 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006876 .release = tracing_buffers_release,
6877 .splice_read = tracing_buffers_splice_read,
6878 .llseek = no_llseek,
6879};
6880
Steven Rostedtc8d77182009-04-29 18:03:45 -04006881static ssize_t
6882tracing_stats_read(struct file *filp, char __user *ubuf,
6883 size_t count, loff_t *ppos)
6884{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006885 struct inode *inode = file_inode(filp);
6886 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006887 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006888 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006889 struct trace_seq *s;
6890 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006891 unsigned long long t;
6892 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006893
Li Zefane4f2d102009-06-15 10:57:28 +08006894 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006895 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006896 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006897
6898 trace_seq_init(s);
6899
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006900 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006901 trace_seq_printf(s, "entries: %ld\n", cnt);
6902
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006903 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006904 trace_seq_printf(s, "overrun: %ld\n", cnt);
6905
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006906 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006907 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6908
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006909 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006910 trace_seq_printf(s, "bytes: %ld\n", cnt);
6911
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006912 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006913 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006914 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006915 usec_rem = do_div(t, USEC_PER_SEC);
6916 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6917 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006918
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006919 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006920 usec_rem = do_div(t, USEC_PER_SEC);
6921 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6922 } else {
6923 /* counter or tsc mode for trace_clock */
6924 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006925 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006926
6927 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006928 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08006929 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006930
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006931 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006932 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6933
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006934 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006935 trace_seq_printf(s, "read events: %ld\n", cnt);
6936
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006937 count = simple_read_from_buffer(ubuf, count, ppos,
6938 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006939
6940 kfree(s);
6941
6942 return count;
6943}
6944
6945static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006946 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006947 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006948 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006949 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006950};
6951
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006952#ifdef CONFIG_DYNAMIC_FTRACE
6953
6954static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006955tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006956 size_t cnt, loff_t *ppos)
6957{
6958 unsigned long *p = filp->private_data;
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04006959 char buf[64]; /* Not too big for a shallow stack */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006960 int r;
6961
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04006962 r = scnprintf(buf, 63, "%ld", *p);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006963 buf[r++] = '\n';
6964
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04006965 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006966}
6967
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006968static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006969 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006970 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006971 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006972};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006973#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006974
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006975#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6976static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04006977ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006978 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006979 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006980{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04006981 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006982}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006983
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006984static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04006985ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04006986 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006987 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006988{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04006989 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006990 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006991
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006992 if (mapper)
6993 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006994
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04006995 if (count) {
6996
6997 if (*count <= 0)
6998 return;
6999
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007000 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007001 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007002
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007003 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007004}
7005
7006static int
7007ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7008 struct ftrace_probe_ops *ops, void *data)
7009{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007010 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007011 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007012
7013 seq_printf(m, "%ps:", (void *)ip);
7014
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007015 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007016
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007017 if (mapper)
7018 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7019
7020 if (count)
7021 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007022 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007023 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007024
7025 return 0;
7026}
7027
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007028static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007029ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007030 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007031{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007032 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007033
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007034 if (!mapper) {
7035 mapper = allocate_ftrace_func_mapper();
7036 if (!mapper)
7037 return -ENOMEM;
7038 *data = mapper;
7039 }
7040
7041 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007042}
7043
7044static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007045ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007046 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007047{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007048 struct ftrace_func_mapper *mapper = data;
7049
7050 if (!ip) {
7051 if (!mapper)
7052 return;
7053 free_ftrace_func_mapper(mapper, NULL);
7054 return;
7055 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007056
7057 ftrace_func_mapper_remove_ip(mapper, ip);
7058}
7059
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007060static struct ftrace_probe_ops snapshot_probe_ops = {
7061 .func = ftrace_snapshot,
7062 .print = ftrace_snapshot_print,
7063};
7064
7065static struct ftrace_probe_ops snapshot_count_probe_ops = {
7066 .func = ftrace_count_snapshot,
7067 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007068 .init = ftrace_snapshot_init,
7069 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007070};
7071
7072static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007073ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007074 char *glob, char *cmd, char *param, int enable)
7075{
7076 struct ftrace_probe_ops *ops;
7077 void *count = (void *)-1;
7078 char *number;
7079 int ret;
7080
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007081 if (!tr)
7082 return -ENODEV;
7083
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007084 /* hash funcs only work with set_ftrace_filter */
7085 if (!enable)
7086 return -EINVAL;
7087
7088 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7089
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007090 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007091 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007092
7093 if (!param)
7094 goto out_reg;
7095
7096 number = strsep(&param, ":");
7097
7098 if (!strlen(number))
7099 goto out_reg;
7100
7101 /*
7102 * We use the callback data field (which is a pointer)
7103 * as our counter.
7104 */
7105 ret = kstrtoul(number, 0, (unsigned long *)&count);
7106 if (ret)
7107 return ret;
7108
7109 out_reg:
Linus Torvalds4c174682017-05-03 18:41:21 -07007110 ret = alloc_snapshot(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007111 if (ret < 0)
7112 goto out;
7113
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007114 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007115
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007116 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007117 return ret < 0 ? ret : 0;
7118}
7119
7120static struct ftrace_func_command ftrace_snapshot_cmd = {
7121 .name = "snapshot",
7122 .func = ftrace_trace_snapshot_callback,
7123};
7124
Tom Zanussi38de93a2013-10-24 08:34:18 -05007125static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007126{
7127 return register_ftrace_command(&ftrace_snapshot_cmd);
7128}
7129#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007130static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007131#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007132
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007133static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007134{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007135 if (WARN_ON(!tr->dir))
7136 return ERR_PTR(-ENODEV);
7137
7138 /* Top directory uses NULL as the parent */
7139 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7140 return NULL;
7141
7142 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007143 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007144}
7145
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007146static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7147{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007148 struct dentry *d_tracer;
7149
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007150 if (tr->percpu_dir)
7151 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007152
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007153 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007154 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007155 return NULL;
7156
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007157 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007158
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007159 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007160 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007161
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007162 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007163}
7164
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007165static struct dentry *
7166trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7167 void *data, long cpu, const struct file_operations *fops)
7168{
7169 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7170
7171 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007172 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007173 return ret;
7174}
7175
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007176static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007177tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007178{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007179 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007180 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007181 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007182
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007183 if (!d_percpu)
7184 return;
7185
Steven Rostedtdd49a382010-10-20 21:51:26 -04007186 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007187 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007188 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007189 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007190 return;
7191 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007192
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007193 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007194 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02007195 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007196
7197 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007198 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007199 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04007200
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007201 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007202 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007203
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007204 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007205 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08007206
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007207 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007208 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007209
7210#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007211 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007212 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007213
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007214 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007215 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007216#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007217}
7218
Steven Rostedt60a11772008-05-12 21:20:44 +02007219#ifdef CONFIG_FTRACE_SELFTEST
7220/* Let selftest have access to static functions in this file */
7221#include "trace_selftest.c"
7222#endif
7223
Steven Rostedt577b7852009-02-26 23:43:05 -05007224static ssize_t
7225trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7226 loff_t *ppos)
7227{
7228 struct trace_option_dentry *topt = filp->private_data;
7229 char *buf;
7230
7231 if (topt->flags->val & topt->opt->bit)
7232 buf = "1\n";
7233 else
7234 buf = "0\n";
7235
7236 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7237}
7238
7239static ssize_t
7240trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7241 loff_t *ppos)
7242{
7243 struct trace_option_dentry *topt = filp->private_data;
7244 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007245 int ret;
7246
Peter Huewe22fe9b52011-06-07 21:58:27 +02007247 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7248 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007249 return ret;
7250
Li Zefan8d18eaa2009-12-08 11:17:06 +08007251 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007252 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007253
7254 if (!!(topt->flags->val & topt->opt->bit) != val) {
7255 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007256 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007257 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007258 mutex_unlock(&trace_types_lock);
7259 if (ret)
7260 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007261 }
7262
7263 *ppos += cnt;
7264
7265 return cnt;
7266}
7267
7268
7269static const struct file_operations trace_options_fops = {
7270 .open = tracing_open_generic,
7271 .read = trace_options_read,
7272 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007273 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007274};
7275
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007276/*
7277 * In order to pass in both the trace_array descriptor as well as the index
7278 * to the flag that the trace option file represents, the trace_array
7279 * has a character array of trace_flags_index[], which holds the index
7280 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7281 * The address of this character array is passed to the flag option file
7282 * read/write callbacks.
7283 *
7284 * In order to extract both the index and the trace_array descriptor,
7285 * get_tr_index() uses the following algorithm.
7286 *
7287 * idx = *ptr;
7288 *
7289 * As the pointer itself contains the address of the index (remember
7290 * index[1] == 1).
7291 *
7292 * Then to get the trace_array descriptor, by subtracting that index
7293 * from the ptr, we get to the start of the index itself.
7294 *
7295 * ptr - idx == &index[0]
7296 *
7297 * Then a simple container_of() from that pointer gets us to the
7298 * trace_array descriptor.
7299 */
7300static void get_tr_index(void *data, struct trace_array **ptr,
7301 unsigned int *pindex)
7302{
7303 *pindex = *(unsigned char *)data;
7304
7305 *ptr = container_of(data - *pindex, struct trace_array,
7306 trace_flags_index);
7307}
7308
Steven Rostedta8259072009-02-26 22:19:12 -05007309static ssize_t
7310trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7311 loff_t *ppos)
7312{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007313 void *tr_index = filp->private_data;
7314 struct trace_array *tr;
7315 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007316 char *buf;
7317
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007318 get_tr_index(tr_index, &tr, &index);
7319
7320 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007321 buf = "1\n";
7322 else
7323 buf = "0\n";
7324
7325 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7326}
7327
7328static ssize_t
7329trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7330 loff_t *ppos)
7331{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007332 void *tr_index = filp->private_data;
7333 struct trace_array *tr;
7334 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007335 unsigned long val;
7336 int ret;
7337
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007338 get_tr_index(tr_index, &tr, &index);
7339
Peter Huewe22fe9b52011-06-07 21:58:27 +02007340 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7341 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007342 return ret;
7343
Zhaoleif2d84b62009-08-07 18:55:48 +08007344 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007345 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007346
7347 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007348 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007349 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007350
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007351 if (ret < 0)
7352 return ret;
7353
Steven Rostedta8259072009-02-26 22:19:12 -05007354 *ppos += cnt;
7355
7356 return cnt;
7357}
7358
Steven Rostedta8259072009-02-26 22:19:12 -05007359static const struct file_operations trace_options_core_fops = {
7360 .open = tracing_open_generic,
7361 .read = trace_options_core_read,
7362 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007363 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007364};
7365
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007366struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007367 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007368 struct dentry *parent,
7369 void *data,
7370 const struct file_operations *fops)
7371{
7372 struct dentry *ret;
7373
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007374 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007375 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007376 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007377
7378 return ret;
7379}
7380
7381
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007382static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007383{
7384 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007385
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007386 if (tr->options)
7387 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007388
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007389 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007390 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007391 return NULL;
7392
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007393 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007394 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007395 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007396 return NULL;
7397 }
7398
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007399 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007400}
7401
Steven Rostedt577b7852009-02-26 23:43:05 -05007402static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007403create_trace_option_file(struct trace_array *tr,
7404 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007405 struct tracer_flags *flags,
7406 struct tracer_opt *opt)
7407{
7408 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007409
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007410 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007411 if (!t_options)
7412 return;
7413
7414 topt->flags = flags;
7415 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007416 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007417
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007418 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007419 &trace_options_fops);
7420
Steven Rostedt577b7852009-02-26 23:43:05 -05007421}
7422
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007423static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007424create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007425{
7426 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007427 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007428 struct tracer_flags *flags;
7429 struct tracer_opt *opts;
7430 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007431 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007432
7433 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007434 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007435
7436 flags = tracer->flags;
7437
7438 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007439 return;
7440
7441 /*
7442 * If this is an instance, only create flags for tracers
7443 * the instance may have.
7444 */
7445 if (!trace_ok_for_array(tracer, tr))
7446 return;
7447
7448 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007449 /* Make sure there's no duplicate flags. */
7450 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007451 return;
7452 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007453
7454 opts = flags->opts;
7455
7456 for (cnt = 0; opts[cnt].name; cnt++)
7457 ;
7458
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007459 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007460 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007461 return;
7462
7463 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7464 GFP_KERNEL);
7465 if (!tr_topts) {
7466 kfree(topts);
7467 return;
7468 }
7469
7470 tr->topts = tr_topts;
7471 tr->topts[tr->nr_topts].tracer = tracer;
7472 tr->topts[tr->nr_topts].topts = topts;
7473 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007474
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007475 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007476 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007477 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007478 WARN_ONCE(topts[cnt].entry == NULL,
7479 "Failed to create trace option: %s",
7480 opts[cnt].name);
7481 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007482}
7483
Steven Rostedta8259072009-02-26 22:19:12 -05007484static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007485create_trace_option_core_file(struct trace_array *tr,
7486 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007487{
7488 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007489
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007490 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007491 if (!t_options)
7492 return NULL;
7493
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007494 return trace_create_file(option, 0644, t_options,
7495 (void *)&tr->trace_flags_index[index],
7496 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007497}
7498
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007499static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007500{
7501 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007502 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007503 int i;
7504
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007505 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007506 if (!t_options)
7507 return;
7508
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007509 for (i = 0; trace_options[i]; i++) {
7510 if (top_level ||
7511 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7512 create_trace_option_core_file(tr, trace_options[i], i);
7513 }
Steven Rostedta8259072009-02-26 22:19:12 -05007514}
7515
Steven Rostedt499e5472012-02-22 15:50:28 -05007516static ssize_t
7517rb_simple_read(struct file *filp, char __user *ubuf,
7518 size_t cnt, loff_t *ppos)
7519{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007520 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007521 char buf[64];
7522 int r;
7523
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007524 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007525 r = sprintf(buf, "%d\n", r);
7526
7527 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7528}
7529
7530static ssize_t
7531rb_simple_write(struct file *filp, const char __user *ubuf,
7532 size_t cnt, loff_t *ppos)
7533{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007534 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007535 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007536 unsigned long val;
7537 int ret;
7538
7539 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7540 if (ret)
7541 return ret;
7542
7543 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007544 mutex_lock(&trace_types_lock);
7545 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007546 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007547 if (tr->current_trace->start)
7548 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007549 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007550 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007551 if (tr->current_trace->stop)
7552 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007553 }
7554 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007555 }
7556
7557 (*ppos)++;
7558
7559 return cnt;
7560}
7561
7562static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007563 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007564 .read = rb_simple_read,
7565 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007566 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007567 .llseek = default_llseek,
7568};
7569
Steven Rostedt277ba042012-08-03 16:10:49 -04007570struct dentry *trace_instance_dir;
7571
7572static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007573init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007574
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007575static int
7576allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007577{
7578 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007579
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007580 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007581
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007582 buf->tr = tr;
7583
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007584 buf->buffer = ring_buffer_alloc(size, rb_flags);
7585 if (!buf->buffer)
7586 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007587
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007588 buf->data = alloc_percpu(struct trace_array_cpu);
7589 if (!buf->data) {
7590 ring_buffer_free(buf->buffer);
7591 return -ENOMEM;
7592 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007593
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007594 /* Allocate the first page for all buffers */
7595 set_buffer_entries(&tr->trace_buffer,
7596 ring_buffer_size(tr->trace_buffer.buffer, 0));
7597
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007598 return 0;
7599}
7600
7601static int allocate_trace_buffers(struct trace_array *tr, int size)
7602{
7603 int ret;
7604
7605 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7606 if (ret)
7607 return ret;
7608
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007609#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007610 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7611 allocate_snapshot ? size : 1);
7612 if (WARN_ON(ret)) {
7613 ring_buffer_free(tr->trace_buffer.buffer);
7614 free_percpu(tr->trace_buffer.data);
7615 return -ENOMEM;
7616 }
7617 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007618
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007619 /*
7620 * Only the top level trace array gets its snapshot allocated
7621 * from the kernel command line.
7622 */
7623 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007624#endif
7625 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007626}
7627
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007628static void free_trace_buffer(struct trace_buffer *buf)
7629{
7630 if (buf->buffer) {
7631 ring_buffer_free(buf->buffer);
7632 buf->buffer = NULL;
7633 free_percpu(buf->data);
7634 buf->data = NULL;
7635 }
7636}
7637
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007638static void free_trace_buffers(struct trace_array *tr)
7639{
7640 if (!tr)
7641 return;
7642
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007643 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007644
7645#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007646 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007647#endif
7648}
7649
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007650static void init_trace_flags_index(struct trace_array *tr)
7651{
7652 int i;
7653
7654 /* Used by the trace options files */
7655 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7656 tr->trace_flags_index[i] = i;
7657}
7658
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007659static void __update_tracer_options(struct trace_array *tr)
7660{
7661 struct tracer *t;
7662
7663 for (t = trace_types; t; t = t->next)
7664 add_tracer_options(tr, t);
7665}
7666
7667static void update_tracer_options(struct trace_array *tr)
7668{
7669 mutex_lock(&trace_types_lock);
7670 __update_tracer_options(tr);
7671 mutex_unlock(&trace_types_lock);
7672}
7673
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007674static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007675{
Steven Rostedt277ba042012-08-03 16:10:49 -04007676 struct trace_array *tr;
7677 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007678
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007679 mutex_lock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007680 mutex_lock(&trace_types_lock);
7681
7682 ret = -EEXIST;
7683 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7684 if (tr->name && strcmp(tr->name, name) == 0)
7685 goto out_unlock;
7686 }
7687
7688 ret = -ENOMEM;
7689 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7690 if (!tr)
7691 goto out_unlock;
7692
7693 tr->name = kstrdup(name, GFP_KERNEL);
7694 if (!tr->name)
7695 goto out_free_tr;
7696
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007697 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7698 goto out_free_tr;
7699
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007700 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007701
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007702 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7703
Steven Rostedt277ba042012-08-03 16:10:49 -04007704 raw_spin_lock_init(&tr->start_lock);
7705
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007706 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7707
Steven Rostedt277ba042012-08-03 16:10:49 -04007708 tr->current_trace = &nop_trace;
7709
7710 INIT_LIST_HEAD(&tr->systems);
7711 INIT_LIST_HEAD(&tr->events);
7712
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007713 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007714 goto out_free_tr;
7715
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007716 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007717 if (!tr->dir)
7718 goto out_free_tr;
7719
7720 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007721 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007722 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007723 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007724 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007725
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007726 ftrace_init_trace_array(tr);
7727
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007728 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007729 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007730 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007731
7732 list_add(&tr->list, &ftrace_trace_arrays);
7733
7734 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007735 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007736
7737 return 0;
7738
7739 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007740 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007741 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007742 kfree(tr->name);
7743 kfree(tr);
7744
7745 out_unlock:
7746 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007747 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007748
7749 return ret;
7750
7751}
7752
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007753static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007754{
7755 struct trace_array *tr;
7756 int found = 0;
7757 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007758 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007759
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007760 mutex_lock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007761 mutex_lock(&trace_types_lock);
7762
7763 ret = -ENODEV;
7764 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7765 if (tr->name && strcmp(tr->name, name) == 0) {
7766 found = 1;
7767 break;
7768 }
7769 }
7770 if (!found)
7771 goto out_unlock;
7772
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007773 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007774 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007775 goto out_unlock;
7776
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007777 list_del(&tr->list);
7778
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007779 /* Disable all the flags that were enabled coming in */
7780 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7781 if ((1 << i) & ZEROED_TRACE_FLAGS)
7782 set_tracer_flag(tr, 1 << i, 0);
7783 }
7784
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007785 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05307786 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007787 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09007788 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007789 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007790 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007791 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007792
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007793 for (i = 0; i < tr->nr_topts; i++) {
7794 kfree(tr->topts[i].topts);
7795 }
7796 kfree(tr->topts);
7797
Chunyu Hudb9108e02017-07-20 18:36:09 +08007798 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007799 kfree(tr->name);
7800 kfree(tr);
7801
7802 ret = 0;
7803
7804 out_unlock:
7805 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007806 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007807
7808 return ret;
7809}
7810
Steven Rostedt277ba042012-08-03 16:10:49 -04007811static __init void create_trace_instances(struct dentry *d_tracer)
7812{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007813 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7814 instance_mkdir,
7815 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007816 if (WARN_ON(!trace_instance_dir))
7817 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007818}
7819
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007820static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007821init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007822{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007823 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007824
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007825 trace_create_file("available_tracers", 0444, d_tracer,
7826 tr, &show_traces_fops);
7827
7828 trace_create_file("current_tracer", 0644, d_tracer,
7829 tr, &set_tracer_fops);
7830
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007831 trace_create_file("tracing_cpumask", 0644, d_tracer,
7832 tr, &tracing_cpumask_fops);
7833
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007834 trace_create_file("trace_options", 0644, d_tracer,
7835 tr, &tracing_iter_fops);
7836
7837 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007838 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007839
7840 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007841 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007842
7843 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007844 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007845
7846 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7847 tr, &tracing_total_entries_fops);
7848
Wang YanQing238ae932013-05-26 16:52:01 +08007849 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007850 tr, &tracing_free_buffer_fops);
7851
7852 trace_create_file("trace_marker", 0220, d_tracer,
7853 tr, &tracing_mark_fops);
7854
Steven Rostedtfa32e852016-07-06 15:25:08 -04007855 trace_create_file("trace_marker_raw", 0220, d_tracer,
7856 tr, &tracing_mark_raw_fops);
7857
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007858 trace_create_file("trace_clock", 0644, d_tracer, tr,
7859 &trace_clock_fops);
7860
7861 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007862 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007863
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007864 create_trace_options_dir(tr);
7865
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007866#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007867 trace_create_file("tracing_max_latency", 0644, d_tracer,
7868 &tr->max_latency, &tracing_max_lat_fops);
7869#endif
7870
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007871 if (ftrace_create_function_files(tr, d_tracer))
7872 WARN(1, "Could not allocate function filter files");
7873
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007874#ifdef CONFIG_TRACER_SNAPSHOT
7875 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007876 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007877#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007878
7879 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007880 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007881
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007882 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007883}
7884
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007885static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007886{
7887 struct vfsmount *mnt;
7888 struct file_system_type *type;
7889
7890 /*
7891 * To maintain backward compatibility for tools that mount
7892 * debugfs to get to the tracing facility, tracefs is automatically
7893 * mounted to the debugfs/tracing directory.
7894 */
7895 type = get_fs_type("tracefs");
7896 if (!type)
7897 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007898 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007899 put_filesystem(type);
7900 if (IS_ERR(mnt))
7901 return NULL;
7902 mntget(mnt);
7903
7904 return mnt;
7905}
7906
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007907/**
7908 * tracing_init_dentry - initialize top level trace array
7909 *
7910 * This is called when creating files or directories in the tracing
7911 * directory. It is called via fs_initcall() by any of the boot up code
7912 * and expects to return the dentry of the top level tracing directory.
7913 */
7914struct dentry *tracing_init_dentry(void)
7915{
7916 struct trace_array *tr = &global_trace;
7917
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007918 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007919 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007920 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007921
Jiaxing Wang8b129192015-11-06 16:04:16 +08007922 if (WARN_ON(!tracefs_initialized()) ||
7923 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7924 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007925 return ERR_PTR(-ENODEV);
7926
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007927 /*
7928 * As there may still be users that expect the tracing
7929 * files to exist in debugfs/tracing, we must automount
7930 * the tracefs file system there, so older tools still
7931 * work with the newer kerenl.
7932 */
7933 tr->dir = debugfs_create_automount("tracing", NULL,
7934 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007935 if (!tr->dir) {
7936 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7937 return ERR_PTR(-ENOMEM);
7938 }
7939
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007940 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007941}
7942
Jeremy Linton00f4b652017-05-31 16:56:43 -05007943extern struct trace_eval_map *__start_ftrace_eval_maps[];
7944extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007945
Jeremy Linton5f60b352017-05-31 16:56:47 -05007946static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007947{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007948 int len;
7949
Jeremy Linton02fd7f62017-05-31 16:56:42 -05007950 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007951 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007952}
7953
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007954#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007955static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007956{
Jeremy Linton99be6472017-05-31 16:56:44 -05007957 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007958 return;
7959
7960 /*
7961 * Modules with bad taint do not have events created, do
7962 * not bother with enums either.
7963 */
7964 if (trace_module_has_bad_taint(mod))
7965 return;
7966
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007967 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007968}
7969
Jeremy Linton681bec02017-05-31 16:56:53 -05007970#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007971static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007972{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05007973 union trace_eval_map_item *map;
7974 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007975
Jeremy Linton99be6472017-05-31 16:56:44 -05007976 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007977 return;
7978
Jeremy Linton1793ed92017-05-31 16:56:46 -05007979 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007980
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05007981 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007982
7983 while (map) {
7984 if (map->head.mod == mod)
7985 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05007986 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007987 last = &map->tail.next;
7988 map = map->tail.next;
7989 }
7990 if (!map)
7991 goto out;
7992
Jeremy Linton5f60b352017-05-31 16:56:47 -05007993 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007994 kfree(map);
7995 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05007996 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007997}
7998#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05007999static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05008000#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008001
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008002static int trace_module_notify(struct notifier_block *self,
8003 unsigned long val, void *data)
8004{
8005 struct module *mod = data;
8006
8007 switch (val) {
8008 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008009 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008010 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008011 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008012 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008013 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008014 }
8015
8016 return 0;
8017}
8018
8019static struct notifier_block trace_module_nb = {
8020 .notifier_call = trace_module_notify,
8021 .priority = 0,
8022};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008023#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008024
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008025static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008026{
8027 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008028
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08008029 trace_access_lock_init();
8030
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008031 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008032 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09008033 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008034
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008035 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04008036 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008037
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008038 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04008039 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008040
Li Zefan339ae5d2009-04-17 10:34:30 +08008041 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008042 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02008043
Avadh Patel69abe6a2009-04-10 16:04:48 -04008044 trace_create_file("saved_cmdlines", 0444, d_tracer,
8045 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03008046
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008047 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8048 NULL, &tracing_saved_cmdlines_size_fops);
8049
Michael Sartain99c621d2017-07-05 22:07:15 -06008050 trace_create_file("saved_tgids", 0444, d_tracer,
8051 NULL, &tracing_saved_tgids_fops);
8052
Jeremy Linton5f60b352017-05-31 16:56:47 -05008053 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008054
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008055 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008056
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008057#ifdef CONFIG_MODULES
8058 register_module_notifier(&trace_module_nb);
8059#endif
8060
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008061#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008062 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8063 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008064#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008065
Steven Rostedt277ba042012-08-03 16:10:49 -04008066 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09008067
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008068 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05008069
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01008070 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008071}
8072
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008073static int trace_panic_handler(struct notifier_block *this,
8074 unsigned long event, void *unused)
8075{
Steven Rostedt944ac422008-10-23 19:26:08 -04008076 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008077 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008078 return NOTIFY_OK;
8079}
8080
8081static struct notifier_block trace_panic_notifier = {
8082 .notifier_call = trace_panic_handler,
8083 .next = NULL,
8084 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8085};
8086
8087static int trace_die_handler(struct notifier_block *self,
8088 unsigned long val,
8089 void *data)
8090{
8091 switch (val) {
8092 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04008093 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008094 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008095 break;
8096 default:
8097 break;
8098 }
8099 return NOTIFY_OK;
8100}
8101
8102static struct notifier_block trace_die_notifier = {
8103 .notifier_call = trace_die_handler,
8104 .priority = 200
8105};
8106
8107/*
8108 * printk is set to max of 1024, we really don't need it that big.
8109 * Nothing should be printing 1000 characters anyway.
8110 */
8111#define TRACE_MAX_PRINT 1000
8112
8113/*
8114 * Define here KERN_TRACE so that we have one place to modify
8115 * it if we decide to change what log level the ftrace dump
8116 * should be at.
8117 */
Steven Rostedt428aee12009-01-14 12:24:42 -05008118#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008119
Jason Wessel955b61e2010-08-05 09:22:23 -05008120void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008121trace_printk_seq(struct trace_seq *s)
8122{
8123 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008124 if (s->seq.len >= TRACE_MAX_PRINT)
8125 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008126
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05008127 /*
8128 * More paranoid code. Although the buffer size is set to
8129 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8130 * an extra layer of protection.
8131 */
8132 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8133 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008134
8135 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008136 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008137
8138 printk(KERN_TRACE "%s", s->buffer);
8139
Steven Rostedtf9520752009-03-02 14:04:40 -05008140 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008141}
8142
Jason Wessel955b61e2010-08-05 09:22:23 -05008143void trace_init_global_iter(struct trace_iterator *iter)
8144{
8145 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008146 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05008147 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008148 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07008149
8150 if (iter->trace && iter->trace->open)
8151 iter->trace->open(iter);
8152
8153 /* Annotate start of buffers if we had overruns */
8154 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8155 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8156
8157 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8158 if (trace_clocks[iter->tr->clock_id].in_ns)
8159 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05008160}
8161
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008162void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008163{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008164 /* use static because iter can be a bit big for the stack */
8165 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008166 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008167 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008168 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04008169 unsigned long flags;
8170 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008171
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008172 /* Only allow one dump user at a time. */
8173 if (atomic_inc_return(&dump_running) != 1) {
8174 atomic_dec(&dump_running);
8175 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04008176 }
8177
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008178 /*
8179 * Always turn off tracing when we dump.
8180 * We don't need to show trace output of what happens
8181 * between multiple crashes.
8182 *
8183 * If the user does a sysrq-z, then they can re-enable
8184 * tracing with echo 1 > tracing_on.
8185 */
8186 tracing_off();
8187
8188 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008189
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08008190 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05008191 trace_init_global_iter(&iter);
8192
Steven Rostedtd7690412008-10-01 00:29:53 -04008193 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05308194 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04008195 }
8196
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008197 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008198
Török Edwinb54d3de2008-11-22 13:28:48 +02008199 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008200 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02008201
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008202 switch (oops_dump_mode) {
8203 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05008204 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008205 break;
8206 case DUMP_ORIG:
8207 iter.cpu_file = raw_smp_processor_id();
8208 break;
8209 case DUMP_NONE:
8210 goto out_enable;
8211 default:
8212 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05008213 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008214 }
8215
8216 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008217
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008218 /* Did function tracer already get disabled? */
8219 if (ftrace_is_dead()) {
8220 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8221 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8222 }
8223
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008224 /*
8225 * We need to stop all tracing on all CPUS to read the
8226 * the next buffer. This is a bit expensive, but is
8227 * not done often. We fill all what we can read,
8228 * and then release the locks again.
8229 */
8230
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008231 while (!trace_empty(&iter)) {
8232
8233 if (!cnt)
8234 printk(KERN_TRACE "---------------------------------\n");
8235
8236 cnt++;
8237
8238 /* reset all but tr, trace, and overruns */
8239 memset(&iter.seq, 0,
8240 sizeof(struct trace_iterator) -
8241 offsetof(struct trace_iterator, seq));
8242 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8243 iter.pos = -1;
8244
Jason Wessel955b61e2010-08-05 09:22:23 -05008245 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08008246 int ret;
8247
8248 ret = print_trace_line(&iter);
8249 if (ret != TRACE_TYPE_NO_CONSUME)
8250 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008251 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05008252 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008253
8254 trace_printk_seq(&iter.seq);
8255 }
8256
8257 if (!cnt)
8258 printk(KERN_TRACE " (ftrace buffer empty)\n");
8259 else
8260 printk(KERN_TRACE "---------------------------------\n");
8261
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008262 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008263 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008264
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008265 for_each_tracing_cpu(cpu) {
8266 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008267 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008268 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008269 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008270}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008271EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008272
Tom Zanussi7e465ba2017-09-22 14:58:20 -05008273int trace_run_command(const char *buf, int (*createfn)(int, char **))
8274{
8275 char **argv;
8276 int argc, ret;
8277
8278 argc = 0;
8279 ret = 0;
8280 argv = argv_split(GFP_KERNEL, buf, &argc);
8281 if (!argv)
8282 return -ENOMEM;
8283
8284 if (argc)
8285 ret = createfn(argc, argv);
8286
8287 argv_free(argv);
8288
8289 return ret;
8290}
8291
8292#define WRITE_BUFSIZE 4096
8293
8294ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8295 size_t count, loff_t *ppos,
8296 int (*createfn)(int, char **))
8297{
8298 char *kbuf, *buf, *tmp;
8299 int ret = 0;
8300 size_t done = 0;
8301 size_t size;
8302
8303 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8304 if (!kbuf)
8305 return -ENOMEM;
8306
8307 while (done < count) {
8308 size = count - done;
8309
8310 if (size >= WRITE_BUFSIZE)
8311 size = WRITE_BUFSIZE - 1;
8312
8313 if (copy_from_user(kbuf, buffer + done, size)) {
8314 ret = -EFAULT;
8315 goto out;
8316 }
8317 kbuf[size] = '\0';
8318 buf = kbuf;
8319 do {
8320 tmp = strchr(buf, '\n');
8321 if (tmp) {
8322 *tmp = '\0';
8323 size = tmp - buf + 1;
8324 } else {
8325 size = strlen(buf);
8326 if (done + size < count) {
8327 if (buf != kbuf)
8328 break;
8329 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8330 pr_warn("Line length is too long: Should be less than %d\n",
8331 WRITE_BUFSIZE - 2);
8332 ret = -EINVAL;
8333 goto out;
8334 }
8335 }
8336 done += size;
8337
8338 /* Remove comments */
8339 tmp = strchr(buf, '#');
8340
8341 if (tmp)
8342 *tmp = '\0';
8343
8344 ret = trace_run_command(buf, createfn);
8345 if (ret)
8346 goto out;
8347 buf += size;
8348
8349 } while (done < count);
8350 }
8351 ret = done;
8352
8353out:
8354 kfree(kbuf);
8355
8356 return ret;
8357}
8358
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008359__init static int tracer_alloc_buffers(void)
8360{
Steven Rostedt73c51622009-03-11 13:42:01 -04008361 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308362 int ret = -ENOMEM;
8363
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008364 /*
8365 * Make sure we don't accidently add more trace options
8366 * than we have bits for.
8367 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008368 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008369
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308370 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8371 goto out;
8372
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008373 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308374 goto out_free_buffer_mask;
8375
Steven Rostedt07d777f2011-09-22 14:01:55 -04008376 /* Only allocate trace_printk buffers if a trace_printk exists */
8377 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04008378 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04008379 trace_printk_init_buffers();
8380
Steven Rostedt73c51622009-03-11 13:42:01 -04008381 /* To save memory, keep the ring buffer size to its minimum */
8382 if (ring_buffer_expanded)
8383 ring_buf_size = trace_buf_size;
8384 else
8385 ring_buf_size = 1;
8386
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308387 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008388 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008389
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008390 raw_spin_lock_init(&global_trace.start_lock);
8391
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008392 /*
8393 * The prepare callbacks allocates some memory for the ring buffer. We
8394 * don't free the buffer if the if the CPU goes down. If we were to free
8395 * the buffer, then the user would lose any trace that was in the
8396 * buffer. The memory will be removed once the "instance" is removed.
8397 */
8398 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8399 "trace/RB:preapre", trace_rb_cpu_prepare,
8400 NULL);
8401 if (ret < 0)
8402 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008403 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03008404 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008405 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8406 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008407 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008408
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008409 if (trace_create_savedcmd() < 0)
8410 goto out_free_temp_buffer;
8411
Steven Rostedtab464282008-05-12 21:21:00 +02008412 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008413 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008414 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8415 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008416 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008417 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04008418
Steven Rostedt499e5472012-02-22 15:50:28 -05008419 if (global_trace.buffer_disabled)
8420 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008421
Steven Rostedte1e232c2014-02-10 23:38:46 -05008422 if (trace_boot_clock) {
8423 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8424 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008425 pr_warn("Trace clock %s not defined, going back to default\n",
8426 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05008427 }
8428
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008429 /*
8430 * register_tracer() might reference current_trace, so it
8431 * needs to be set before we register anything. This is
8432 * just a bootstrap of current_trace anyway.
8433 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008434 global_trace.current_trace = &nop_trace;
8435
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008436 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8437
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05008438 ftrace_init_global_array_ops(&global_trace);
8439
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008440 init_trace_flags_index(&global_trace);
8441
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008442 register_tracer(&nop_trace);
8443
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05008444 /* Function tracing may start here (via kernel command line) */
8445 init_function_trace();
8446
Steven Rostedt60a11772008-05-12 21:20:44 +02008447 /* All seems OK, enable tracing */
8448 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008449
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008450 atomic_notifier_chain_register(&panic_notifier_list,
8451 &trace_panic_notifier);
8452
8453 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008454
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008455 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8456
8457 INIT_LIST_HEAD(&global_trace.systems);
8458 INIT_LIST_HEAD(&global_trace.events);
8459 list_add(&global_trace.list, &ftrace_trace_arrays);
8460
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08008461 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04008462
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008463 register_snapshot_cmd();
8464
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008465 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008466
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008467out_free_savedcmd:
8468 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008469out_free_temp_buffer:
8470 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008471out_rm_hp_state:
8472 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308473out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008474 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308475out_free_buffer_mask:
8476 free_cpumask_var(tracing_buffer_mask);
8477out:
8478 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008479}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008480
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008481void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008482{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008483 if (tracepoint_printk) {
8484 tracepoint_print_iter =
8485 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8486 if (WARN_ON(!tracepoint_print_iter))
8487 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008488 else
8489 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008490 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008491 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008492}
8493
8494void __init trace_init(void)
8495{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008496 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008497}
8498
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008499__init static int clear_boot_tracer(void)
8500{
8501 /*
8502 * The default tracer at boot buffer is an init section.
8503 * This function is called in lateinit. If we did not
8504 * find the boot tracer, then clear it out, to prevent
8505 * later registration from accessing the buffer that is
8506 * about to be freed.
8507 */
8508 if (!default_bootup_tracer)
8509 return 0;
8510
8511 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8512 default_bootup_tracer);
8513 default_bootup_tracer = NULL;
8514
8515 return 0;
8516}
8517
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008518fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04008519late_initcall_sync(clear_boot_tracer);