blob: ec5b21778806c91ae7da665e2910d777b04cf240 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
20#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040021#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050022#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020023#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050024#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020025#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020026#include <linux/hardirq.h>
27#include <linux/linkage.h>
28#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040029#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020030#include <linux/ftrace.h>
31#include <linux/module.h>
32#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050033#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040034#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010035#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050036#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080037#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020039#include <linux/ctype.h>
40#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020041#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050042#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080044#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010045#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060046#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020047
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050049#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020050
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010051/*
Steven Rostedt73c51622009-03-11 13:42:01 -040052 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
54 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050055bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040056
57/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010058 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010061 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062 * at the same time, giving false positive or negative results.
63 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010064static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010065
Steven Rostedtb2821ae2009-02-02 21:38:32 -050066/*
67 * If a tracer is running, we do not want to run SELFTEST.
68 */
Li Zefan020e5f82009-07-01 10:47:05 +080069bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050071/* Pipe tracepoints to printk */
72struct trace_iterator *tracepoint_print_iter;
73int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050074static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010076/* For tracers that don't implement custom flags */
77static struct tracer_opt dummy_tracer_opt[] = {
78 { }
79};
80
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050081static int
82dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010083{
84 return 0;
85}
Steven Rostedt0f048702008-11-05 16:05:44 -050086
87/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040088 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
91 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070092static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040093
94/*
Steven Rostedt0f048702008-11-05 16:05:44 -050095 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
99 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100100static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500101
Jason Wessel955b61e2010-08-05 09:22:23 -0500102cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200103
Steven Rostedt944ac422008-10-23 19:26:08 -0400104/*
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 *
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
112 *
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400118 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119
120enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400121
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400122/* When set, tracing will stop when a WARN*() is hit */
123int __disable_trace_on_warning;
124
Jeremy Linton681bec02017-05-31 16:56:53 -0500125#ifdef CONFIG_TRACE_EVAL_MAP_FILE
126/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500127struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400128 struct module *mod;
129 unsigned long length;
130};
131
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500132union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400133
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500134struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400135 /*
136 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500137 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400138 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500139 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400140 const char *end; /* points to NULL */
141};
142
Jeremy Linton1793ed92017-05-31 16:56:46 -0500143static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144
145/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500146 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500150 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500152union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500153 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400156};
157
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500159#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500161static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500162
Li Zefanee6c2c12009-09-18 14:06:47 +0800163#define MAX_TRACER_SIZE 100
164static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500165static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100166
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500167static bool allocate_snapshot;
168
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200169static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100170{
Chen Gang67012ab2013-04-08 12:06:44 +0800171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500172 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400173 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500174 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100175 return 1;
176}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200177__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100178
Steven Rostedt944ac422008-10-23 19:26:08 -0400179static int __init set_ftrace_dump_on_oops(char *str)
180{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
183 return 1;
184 }
185
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
188 return 1;
189 }
190
191 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400192}
193__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195static int __init stop_trace_on_warning(char *str)
196{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400199 return 1;
200}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200201__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400202
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400203static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500204{
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
208 return 1;
209}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400210__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500211
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400214
215static int __init set_trace_boot_options(char *str)
216{
Chen Gang67012ab2013-04-08 12:06:44 +0800217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400218 return 0;
219}
220__setup("trace_options=", set_trace_boot_options);
221
Steven Rostedte1e232c2014-02-10 23:38:46 -0500222static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223static char *trace_boot_clock __initdata;
224
225static int __init set_trace_boot_clock(char *str)
226{
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
229 return 0;
230}
231__setup("trace_clock=", set_trace_boot_clock);
232
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500233static int __init set_tracepoint_printk(char *str)
234{
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
237 return 1;
238}
239__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400240
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100241unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200242{
243 nsec += 500;
244 do_div(nsec, 1000);
245 return nsec;
246}
247
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400248/* trace_flags holds trace_options default values */
249#define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
255
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400256/* trace_options that are only supported by global_trace */
257#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
259
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400260/* trace_flags that are default zero for instances */
261#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400263
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200264/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200267 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400268static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
270};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400272LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200273
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400274int trace_array_get(struct trace_array *this_tr)
275{
276 struct trace_array *tr;
277 int ret = -ENODEV;
278
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
281 if (tr == this_tr) {
282 tr->ref++;
283 ret = 0;
284 break;
285 }
286 }
287 mutex_unlock(&trace_types_lock);
288
289 return ret;
290}
291
292static void __trace_array_put(struct trace_array *this_tr)
293{
294 WARN_ON(!this_tr->ref);
295 this_tr->ref--;
296}
297
298void trace_array_put(struct trace_array *this_tr)
299{
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
303}
304
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400305int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
308{
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400311 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500312 return 1;
313 }
314
315 return 0;
316}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500317
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400318void trace_free_pid_list(struct trace_pid_list *pid_list)
319{
320 vfree(pid_list->pids);
321 kfree(pid_list);
322}
323
Steven Rostedtd8275c42016-04-14 12:15:22 -0400324/**
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
328 *
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 */
331bool
332trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333{
334 /*
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
337 */
338 if (search_pid >= filtered_pids->pid_max)
339 return false;
340
341 return test_bit(search_pid, filtered_pids->pids);
342}
343
344/**
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
348 *
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
352 */
353bool
354trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355{
356 /*
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
359 */
360 if (!filtered_pids)
361 return false;
362
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
364}
365
366/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
371 *
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
376 * of a task.
377 */
378void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
381{
382 if (!pid_list)
383 return;
384
385 /* For forks, we only add if the forking task is listed */
386 if (self) {
387 if (!trace_find_filtered_pid(pid_list, self->pid))
388 return;
389 }
390
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
393 return;
394
395 /* "self" is set for forks, and NULL for exits */
396 if (self)
397 set_bit(task->pid, pid_list->pids);
398 else
399 clear_bit(task->pid, pid_list->pids);
400}
401
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400402/**
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
407 *
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
410 *
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
413 */
414void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
415{
416 unsigned long pid = (unsigned long)v;
417
418 (*pos)++;
419
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
422
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
426
427 return NULL;
428}
429
430/**
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
434 *
435 * This is used by seq_file "start" operation to start the iteration
436 * of listing pids.
437 *
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
440 */
441void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
442{
443 unsigned long pid;
444 loff_t l = 0;
445
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
448 return NULL;
449
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
453 ;
454 return (void *)pid;
455}
456
457/**
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
461 *
462 * Can be directly used by seq_file operations to display the current
463 * pid value.
464 */
465int trace_pid_show(struct seq_file *m, void *v)
466{
467 unsigned long pid = (unsigned long)v - 1;
468
469 seq_printf(m, "%lu\n", pid);
470 return 0;
471}
472
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400473/* 128 should be much more than enough */
474#define PID_BUF_SIZE 127
475
476int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
479{
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
482 unsigned long val;
483 int nr_pids = 0;
484 ssize_t read = 0;
485 ssize_t ret = 0;
486 loff_t pos;
487 pid_t pid;
488
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
490 return -ENOMEM;
491
492 /*
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
496 * not modified.
497 */
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499 if (!pid_list)
500 return -ENOMEM;
501
502 pid_list->pid_max = READ_ONCE(pid_max);
503
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
506 pid_list->pid_max = filtered_pids->pid_max;
507
508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509 if (!pid_list->pids) {
510 kfree(pid_list);
511 return -ENOMEM;
512 }
513
514 if (filtered_pids) {
515 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000516 for_each_set_bit(pid, filtered_pids->pids,
517 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400518 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400519 nr_pids++;
520 }
521 }
522
523 while (cnt > 0) {
524
525 pos = 0;
526
527 ret = trace_get_user(&parser, ubuf, cnt, &pos);
528 if (ret < 0 || !trace_parser_loaded(&parser))
529 break;
530
531 read += ret;
532 ubuf += ret;
533 cnt -= ret;
534
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700573 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400574 return trace_clock_local();
575
Alexander Z Lam94571582013-08-02 18:36:16 -0700576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400578
579 return ts;
580}
581
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100582u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
Steven Rostedt90369902008-11-05 16:05:44 -0500596int tracing_is_enabled(void)
597{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500605}
606
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200607/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400618
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200622static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700627DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500657 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
Steven Rostedtae3b5092013-01-23 15:22:59 -0500663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500673 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400730{
731}
732
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400733#endif
734
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400760void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200777/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700793 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800818 int pc;
819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800821 return 0;
822
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800823 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500824
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800868 int pc;
869
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800871 return 0;
872
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800873 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500874
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500896#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -0400897void tracing_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500898{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500908 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400926
927/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800928 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941void tracing_snapshot(void)
942{
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500947EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500948
949static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400951static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -0400953int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400954{
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969}
970
Fabian Frederickad1438a2014-04-17 21:44:42 +0200971static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400972{
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500983
984/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994int tracing_alloc_snapshot(void)
995{
996 struct trace_array *tr = &global_trace;
997 int ret;
998
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -0400999 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001000 WARN_ON(ret < 0);
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
1006/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001008 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001009 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017void tracing_snapshot_alloc(void)
1018{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001019 int ret;
1020
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001023 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001024
1025 tracing_snapshot();
1026}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001027EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001028#else
1029void tracing_snapshot(void)
1030{
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001033EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001034int tracing_alloc_snapshot(void)
1035{
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038}
1039EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001040void tracing_snapshot_alloc(void)
1041{
1042 /* Give warning */
1043 tracing_snapshot();
1044}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001045EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001046#endif /* CONFIG_TRACER_SNAPSHOT */
1047
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001048void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001049{
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063}
1064
Steven Rostedt499e5472012-02-22 15:50:28 -05001065/**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073void tracing_off(void)
1074{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001075 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001076}
1077EXPORT_SYMBOL_GPL(tracing_off);
1078
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001079void disable_trace_on_warning(void)
1080{
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083}
1084
Steven Rostedt499e5472012-02-22 15:50:28 -05001085/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001091bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001092{
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096}
1097
Steven Rostedt499e5472012-02-22 15:50:28 -05001098/**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101int tracing_is_on(void)
1102{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001103 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001104}
1105EXPORT_SYMBOL_GPL(tracing_is_on);
1106
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001107static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001108{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001109 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001110
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111 if (!str)
1112 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001113 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001114 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001115 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001116 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001117 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001118 return 1;
1119}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001120__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001121
Tim Bird0e950172010-02-25 15:36:43 -08001122static int __init set_tracing_thresh(char *str)
1123{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001124 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001125 int ret;
1126
1127 if (!str)
1128 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001129 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001130 if (ret < 0)
1131 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001132 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001133 return 1;
1134}
1135__setup("tracing_thresh=", set_tracing_thresh);
1136
Steven Rostedt57f50be2008-05-12 21:20:44 +02001137unsigned long nsecs_to_usecs(unsigned long nsecs)
1138{
1139 return nsecs / 1000;
1140}
1141
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001142/*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001146 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001147 */
1148#undef C
1149#define C(a, b) b
1150
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001151/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001152static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001153 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001154 NULL
1155};
1156
Zhaolei5079f322009-08-25 16:12:56 +08001157static struct {
1158 u64 (*func)(void);
1159 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001160 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001161} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001165 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001169 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001170 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001171};
1172
Tom Zanussi860f9f62018-01-15 20:51:48 -06001173bool trace_clock_in_ns(struct trace_array *tr)
1174{
1175 if (trace_clocks[tr->clock_id].in_ns)
1176 return true;
1177
1178 return false;
1179}
1180
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001181/*
1182 * trace_parser_get_init - gets the buffer for trace parser
1183 */
1184int trace_parser_get_init(struct trace_parser *parser, int size)
1185{
1186 memset(parser, 0, sizeof(*parser));
1187
1188 parser->buffer = kmalloc(size, GFP_KERNEL);
1189 if (!parser->buffer)
1190 return 1;
1191
1192 parser->size = size;
1193 return 0;
1194}
1195
1196/*
1197 * trace_parser_put - frees the buffer for trace parser
1198 */
1199void trace_parser_put(struct trace_parser *parser)
1200{
1201 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001202 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001203}
1204
1205/*
1206 * trace_get_user - reads the user input string separated by space
1207 * (matched by isspace(ch))
1208 *
1209 * For each string found the 'struct trace_parser' is updated,
1210 * and the function returns.
1211 *
1212 * Returns number of bytes read.
1213 *
1214 * See kernel/trace/trace.h for 'struct trace_parser' details.
1215 */
1216int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1217 size_t cnt, loff_t *ppos)
1218{
1219 char ch;
1220 size_t read = 0;
1221 ssize_t ret;
1222
1223 if (!*ppos)
1224 trace_parser_clear(parser);
1225
1226 ret = get_user(ch, ubuf++);
1227 if (ret)
1228 goto out;
1229
1230 read++;
1231 cnt--;
1232
1233 /*
1234 * The parser is not finished with the last write,
1235 * continue reading the user input without skipping spaces.
1236 */
1237 if (!parser->cont) {
1238 /* skip white space */
1239 while (cnt && isspace(ch)) {
1240 ret = get_user(ch, ubuf++);
1241 if (ret)
1242 goto out;
1243 read++;
1244 cnt--;
1245 }
1246
Changbin Du76638d92018-01-16 17:02:29 +08001247 parser->idx = 0;
1248
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001249 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001250 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001251 *ppos += read;
1252 ret = read;
1253 goto out;
1254 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001255 }
1256
1257 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001258 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001259 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001260 parser->buffer[parser->idx++] = ch;
1261 else {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 ret = get_user(ch, ubuf++);
1266 if (ret)
1267 goto out;
1268 read++;
1269 cnt--;
1270 }
1271
1272 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001273 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001274 parser->buffer[parser->idx] = 0;
1275 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001276 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001277 parser->cont = true;
1278 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001279 /* Make sure the parsed string always terminates with '\0'. */
1280 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001281 } else {
1282 ret = -EINVAL;
1283 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001284 }
1285
1286 *ppos += read;
1287 ret = read;
1288
1289out:
1290 return ret;
1291}
1292
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001293/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001294static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001295{
1296 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001297
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001298 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001299 return -EBUSY;
1300
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001301 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001302 if (cnt > len)
1303 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001304 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001305
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001306 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001307 return cnt;
1308}
1309
Tim Bird0e950172010-02-25 15:36:43 -08001310unsigned long __read_mostly tracing_thresh;
1311
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001312#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001313/*
1314 * Copy the new maximum trace into the separate maximum-trace
1315 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001316 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001317 */
1318static void
1319__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1320{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001321 struct trace_buffer *trace_buf = &tr->trace_buffer;
1322 struct trace_buffer *max_buf = &tr->max_buffer;
1323 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1324 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001325
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001326 max_buf->cpu = cpu;
1327 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001328
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001329 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001330 max_data->critical_start = data->critical_start;
1331 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001332
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001333 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001334 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001335 /*
1336 * If tsk == current, then use current_uid(), as that does not use
1337 * RCU. The irq tracer can be called out of RCU scope.
1338 */
1339 if (tsk == current)
1340 max_data->uid = current_uid();
1341 else
1342 max_data->uid = task_uid(tsk);
1343
Steven Rostedt8248ac02009-09-02 12:27:41 -04001344 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1345 max_data->policy = tsk->policy;
1346 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001347
1348 /* record this tasks comm */
1349 tracing_record_cmdline(tsk);
1350}
1351
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001352/**
1353 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1354 * @tr: tracer
1355 * @tsk: the task with the latency
1356 * @cpu: The cpu that initiated the trace.
1357 *
1358 * Flip the buffers between the @tr and the max_tr and record information
1359 * about which task was the cause of this latency.
1360 */
Ingo Molnare309b412008-05-12 21:20:51 +02001361void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001362update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1363{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001364 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001365 return;
1366
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001367 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001368
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001369 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001370 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001371 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001372 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001373 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001374
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001375 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001376
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001377 /* Inherit the recordable setting from trace_buffer */
1378 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1379 ring_buffer_record_on(tr->max_buffer.buffer);
1380 else
1381 ring_buffer_record_off(tr->max_buffer.buffer);
1382
Gustavo A. R. Silva08ae88f2018-02-09 11:53:16 -06001383 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001384
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001385 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001386 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001387}
1388
1389/**
1390 * update_max_tr_single - only copy one trace over, and reset the rest
1391 * @tr - tracer
1392 * @tsk - task with the latency
1393 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001394 *
1395 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001396 */
Ingo Molnare309b412008-05-12 21:20:51 +02001397void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001398update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1399{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001400 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001401
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001402 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001403 return;
1404
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001405 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001406 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001407 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001408 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001409 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001410 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001411
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001412 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001413
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001414 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001415
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001416 if (ret == -EBUSY) {
1417 /*
1418 * We failed to swap the buffer due to a commit taking
1419 * place on this CPU. We fail to record, but we reset
1420 * the max trace buffer (no one writes directly to it)
1421 * and flag that it failed.
1422 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001423 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001424 "Failed to swap buffers due to commit in progress\n");
1425 }
1426
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001427 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001428
1429 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001430 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001431}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001432#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001433
Rabin Vincente30f53a2014-11-10 19:46:34 +01001434static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001435{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001436 /* Iterators are static, they should be filled or empty */
1437 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001438 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001439
Rabin Vincente30f53a2014-11-10 19:46:34 +01001440 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1441 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001442}
1443
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001444#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001445static bool selftests_can_run;
1446
1447struct trace_selftests {
1448 struct list_head list;
1449 struct tracer *type;
1450};
1451
1452static LIST_HEAD(postponed_selftests);
1453
1454static int save_selftest(struct tracer *type)
1455{
1456 struct trace_selftests *selftest;
1457
1458 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1459 if (!selftest)
1460 return -ENOMEM;
1461
1462 selftest->type = type;
1463 list_add(&selftest->list, &postponed_selftests);
1464 return 0;
1465}
1466
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001467static int run_tracer_selftest(struct tracer *type)
1468{
1469 struct trace_array *tr = &global_trace;
1470 struct tracer *saved_tracer = tr->current_trace;
1471 int ret;
1472
1473 if (!type->selftest || tracing_selftest_disabled)
1474 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001475
1476 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001477 * If a tracer registers early in boot up (before scheduling is
1478 * initialized and such), then do not run its selftests yet.
1479 * Instead, run it a little later in the boot process.
1480 */
1481 if (!selftests_can_run)
1482 return save_selftest(type);
1483
1484 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001485 * Run a selftest on this tracer.
1486 * Here we reset the trace buffer, and set the current
1487 * tracer to be this tracer. The tracer can then run some
1488 * internal tracing to verify that everything is in order.
1489 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001490 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001491 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001492
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001493 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001494
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001495#ifdef CONFIG_TRACER_MAX_TRACE
1496 if (type->use_max_tr) {
1497 /* If we expanded the buffers, make sure the max is expanded too */
1498 if (ring_buffer_expanded)
1499 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1500 RING_BUFFER_ALL_CPUS);
1501 tr->allocated_snapshot = true;
1502 }
1503#endif
1504
1505 /* the test is responsible for initializing and enabling */
1506 pr_info("Testing tracer %s: ", type->name);
1507 ret = type->selftest(type, tr);
1508 /* the test is responsible for resetting too */
1509 tr->current_trace = saved_tracer;
1510 if (ret) {
1511 printk(KERN_CONT "FAILED!\n");
1512 /* Add the warning after printing 'FAILED' */
1513 WARN_ON(1);
1514 return -1;
1515 }
1516 /* Only reset on passing, to avoid touching corrupted buffers */
1517 tracing_reset_online_cpus(&tr->trace_buffer);
1518
1519#ifdef CONFIG_TRACER_MAX_TRACE
1520 if (type->use_max_tr) {
1521 tr->allocated_snapshot = false;
1522
1523 /* Shrink the max buffer again */
1524 if (ring_buffer_expanded)
1525 ring_buffer_resize(tr->max_buffer.buffer, 1,
1526 RING_BUFFER_ALL_CPUS);
1527 }
1528#endif
1529
1530 printk(KERN_CONT "PASSED\n");
1531 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001533
1534static __init int init_trace_selftests(void)
1535{
1536 struct trace_selftests *p, *n;
1537 struct tracer *t, **last;
1538 int ret;
1539
1540 selftests_can_run = true;
1541
1542 mutex_lock(&trace_types_lock);
1543
1544 if (list_empty(&postponed_selftests))
1545 goto out;
1546
1547 pr_info("Running postponed tracer tests:\n");
1548
1549 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1550 ret = run_tracer_selftest(p->type);
1551 /* If the test fails, then warn and remove from available_tracers */
1552 if (ret < 0) {
1553 WARN(1, "tracer: %s failed selftest, disabling\n",
1554 p->type->name);
1555 last = &trace_types;
1556 for (t = trace_types; t; t = t->next) {
1557 if (t == p->type) {
1558 *last = t->next;
1559 break;
1560 }
1561 last = &t->next;
1562 }
1563 }
1564 list_del(&p->list);
1565 kfree(p);
1566 }
1567
1568 out:
1569 mutex_unlock(&trace_types_lock);
1570
1571 return 0;
1572}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001573core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001574#else
1575static inline int run_tracer_selftest(struct tracer *type)
1576{
1577 return 0;
1578}
1579#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001581static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1582
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001583static void __init apply_trace_boot_options(void);
1584
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001585/**
1586 * register_tracer - register a tracer with the ftrace system.
1587 * @type - the plugin for the tracer
1588 *
1589 * Register a new plugin tracer.
1590 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001591int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001592{
1593 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001594 int ret = 0;
1595
1596 if (!type->name) {
1597 pr_info("Tracer must have a name\n");
1598 return -1;
1599 }
1600
Dan Carpenter24a461d2010-07-10 12:06:44 +02001601 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001602 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1603 return -1;
1604 }
1605
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001607
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001608 tracing_selftest_running = true;
1609
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001610 for (t = trace_types; t; t = t->next) {
1611 if (strcmp(type->name, t->name) == 0) {
1612 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001613 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001614 type->name);
1615 ret = -1;
1616 goto out;
1617 }
1618 }
1619
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001620 if (!type->set_flag)
1621 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001622 if (!type->flags) {
1623 /*allocate a dummy tracer_flags*/
1624 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001625 if (!type->flags) {
1626 ret = -ENOMEM;
1627 goto out;
1628 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001629 type->flags->val = 0;
1630 type->flags->opts = dummy_tracer_opt;
1631 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001632 if (!type->flags->opts)
1633 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001634
Chunyu Hud39cdd22016-03-08 21:37:01 +08001635 /* store the tracer for __set_tracer_option */
1636 type->flags->trace = type;
1637
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001638 ret = run_tracer_selftest(type);
1639 if (ret < 0)
1640 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001641
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001642 type->next = trace_types;
1643 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001644 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001645
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001646 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001647 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001648 mutex_unlock(&trace_types_lock);
1649
Steven Rostedtdac74942009-02-05 01:13:38 -05001650 if (ret || !default_bootup_tracer)
1651 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001652
Li Zefanee6c2c12009-09-18 14:06:47 +08001653 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001654 goto out_unlock;
1655
1656 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1657 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001658 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001659 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001660
1661 apply_trace_boot_options();
1662
Steven Rostedtdac74942009-02-05 01:13:38 -05001663 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001664 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001665#ifdef CONFIG_FTRACE_STARTUP_TEST
1666 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1667 type->name);
1668#endif
1669
1670 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001671 return ret;
1672}
1673
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001674void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001675{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001676 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001677
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001678 if (!buffer)
1679 return;
1680
Steven Rostedtf6339032009-09-04 12:35:16 -04001681 ring_buffer_record_disable(buffer);
1682
1683 /* Make sure all commits have finished */
1684 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001685 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001686
1687 ring_buffer_record_enable(buffer);
1688}
1689
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001690void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001691{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001692 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001693 int cpu;
1694
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001695 if (!buffer)
1696 return;
1697
Steven Rostedt621968c2009-09-04 12:02:35 -04001698 ring_buffer_record_disable(buffer);
1699
1700 /* Make sure all commits have finished */
1701 synchronize_sched();
1702
Alexander Z Lam94571582013-08-02 18:36:16 -07001703 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001704
1705 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001706 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001707
1708 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001709}
1710
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001711/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001712void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001713{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001714 struct trace_array *tr;
1715
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001716 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04001717 if (!tr->clear_trace)
1718 continue;
1719 tr->clear_trace = false;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001720 tracing_reset_online_cpus(&tr->trace_buffer);
1721#ifdef CONFIG_TRACER_MAX_TRACE
1722 tracing_reset_online_cpus(&tr->max_buffer);
1723#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001724 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001725}
1726
Joel Fernandesd914ba32017-06-26 19:01:55 -07001727static int *tgid_map;
1728
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001729#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001730#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001731static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001732struct saved_cmdlines_buffer {
1733 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1734 unsigned *map_cmdline_to_pid;
1735 unsigned cmdline_num;
1736 int cmdline_idx;
1737 char *saved_cmdlines;
1738};
1739static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001740
Steven Rostedt25b0b442008-05-12 21:21:00 +02001741/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07001742static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001743
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001744static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001745{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001746 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1747}
1748
1749static inline void set_cmdline(int idx, const char *cmdline)
1750{
1751 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1752}
1753
1754static int allocate_cmdlines_buffer(unsigned int val,
1755 struct saved_cmdlines_buffer *s)
1756{
Kees Cook6da2ec52018-06-12 13:55:00 -07001757 s->map_cmdline_to_pid = kmalloc_array(val,
1758 sizeof(*s->map_cmdline_to_pid),
1759 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001760 if (!s->map_cmdline_to_pid)
1761 return -ENOMEM;
1762
Kees Cook6da2ec52018-06-12 13:55:00 -07001763 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001764 if (!s->saved_cmdlines) {
1765 kfree(s->map_cmdline_to_pid);
1766 return -ENOMEM;
1767 }
1768
1769 s->cmdline_idx = 0;
1770 s->cmdline_num = val;
1771 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1772 sizeof(s->map_pid_to_cmdline));
1773 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1774 val * sizeof(*s->map_cmdline_to_pid));
1775
1776 return 0;
1777}
1778
1779static int trace_create_savedcmd(void)
1780{
1781 int ret;
1782
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001783 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001784 if (!savedcmd)
1785 return -ENOMEM;
1786
1787 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1788 if (ret < 0) {
1789 kfree(savedcmd);
1790 savedcmd = NULL;
1791 return -ENOMEM;
1792 }
1793
1794 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001795}
1796
Carsten Emdeb5130b12009-09-13 01:43:07 +02001797int is_tracing_stopped(void)
1798{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001799 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001800}
1801
Steven Rostedt0f048702008-11-05 16:05:44 -05001802/**
1803 * tracing_start - quick start of the tracer
1804 *
1805 * If tracing is enabled but was stopped by tracing_stop,
1806 * this will start the tracer back up.
1807 */
1808void tracing_start(void)
1809{
1810 struct ring_buffer *buffer;
1811 unsigned long flags;
1812
1813 if (tracing_disabled)
1814 return;
1815
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001816 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1817 if (--global_trace.stop_count) {
1818 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001819 /* Someone screwed up their debugging */
1820 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001821 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001822 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001823 goto out;
1824 }
1825
Steven Rostedta2f80712010-03-12 19:56:00 -05001826 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001827 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001828
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001829 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001830 if (buffer)
1831 ring_buffer_record_enable(buffer);
1832
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001833#ifdef CONFIG_TRACER_MAX_TRACE
1834 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001835 if (buffer)
1836 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001837#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001838
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001839 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001840
Steven Rostedt0f048702008-11-05 16:05:44 -05001841 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001842 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1843}
1844
1845static void tracing_start_tr(struct trace_array *tr)
1846{
1847 struct ring_buffer *buffer;
1848 unsigned long flags;
1849
1850 if (tracing_disabled)
1851 return;
1852
1853 /* If global, we need to also start the max tracer */
1854 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1855 return tracing_start();
1856
1857 raw_spin_lock_irqsave(&tr->start_lock, flags);
1858
1859 if (--tr->stop_count) {
1860 if (tr->stop_count < 0) {
1861 /* Someone screwed up their debugging */
1862 WARN_ON_ONCE(1);
1863 tr->stop_count = 0;
1864 }
1865 goto out;
1866 }
1867
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001868 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001869 if (buffer)
1870 ring_buffer_record_enable(buffer);
1871
1872 out:
1873 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001874}
1875
1876/**
1877 * tracing_stop - quick stop of the tracer
1878 *
1879 * Light weight way to stop tracing. Use in conjunction with
1880 * tracing_start.
1881 */
1882void tracing_stop(void)
1883{
1884 struct ring_buffer *buffer;
1885 unsigned long flags;
1886
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001887 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1888 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001889 goto out;
1890
Steven Rostedta2f80712010-03-12 19:56:00 -05001891 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001892 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001893
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001894 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001895 if (buffer)
1896 ring_buffer_record_disable(buffer);
1897
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001898#ifdef CONFIG_TRACER_MAX_TRACE
1899 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001900 if (buffer)
1901 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001902#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001903
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001904 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001905
Steven Rostedt0f048702008-11-05 16:05:44 -05001906 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001907 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1908}
1909
1910static void tracing_stop_tr(struct trace_array *tr)
1911{
1912 struct ring_buffer *buffer;
1913 unsigned long flags;
1914
1915 /* If global, we need to also stop the max tracer */
1916 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1917 return tracing_stop();
1918
1919 raw_spin_lock_irqsave(&tr->start_lock, flags);
1920 if (tr->stop_count++)
1921 goto out;
1922
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001923 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001924 if (buffer)
1925 ring_buffer_record_disable(buffer);
1926
1927 out:
1928 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001929}
1930
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001931static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001932{
Carsten Emdea635cf02009-03-18 09:00:41 +01001933 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001934
Joel Fernandeseaf260a2017-07-06 16:00:21 -07001935 /* treat recording of idle task as a success */
1936 if (!tsk->pid)
1937 return 1;
1938
1939 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001940 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001941
1942 /*
1943 * It's not the end of the world if we don't get
1944 * the lock, but we also don't want to spin
1945 * nor do we want to disable interrupts,
1946 * so if we miss here, then better luck next time.
1947 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001948 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001949 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001950
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001951 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001952 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001953 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001954
Carsten Emdea635cf02009-03-18 09:00:41 +01001955 /*
1956 * Check whether the cmdline buffer at idx has a pid
1957 * mapped. We are going to overwrite that entry so we
1958 * need to clear the map_pid_to_cmdline. Otherwise we
1959 * would read the new comm for the old pid.
1960 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001961 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001962 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001963 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001964
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001965 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1966 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001967
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001968 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001969 }
1970
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001971 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001972
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001973 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001974
1975 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001976}
1977
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001978static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001979{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001980 unsigned map;
1981
Steven Rostedt4ca530852009-03-16 19:20:15 -04001982 if (!pid) {
1983 strcpy(comm, "<idle>");
1984 return;
1985 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001986
Steven Rostedt74bf4072010-01-25 15:11:53 -05001987 if (WARN_ON_ONCE(pid < 0)) {
1988 strcpy(comm, "<XXX>");
1989 return;
1990 }
1991
Steven Rostedt4ca530852009-03-16 19:20:15 -04001992 if (pid > PID_MAX_DEFAULT) {
1993 strcpy(comm, "<...>");
1994 return;
1995 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001996
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001997 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001998 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05301999 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002000 else
2001 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002002}
2003
2004void trace_find_cmdline(int pid, char comm[])
2005{
2006 preempt_disable();
2007 arch_spin_lock(&trace_cmdline_lock);
2008
2009 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002010
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002011 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002012 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002013}
2014
Joel Fernandesd914ba32017-06-26 19:01:55 -07002015int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002016{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002017 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2018 return 0;
2019
2020 return tgid_map[pid];
2021}
2022
2023static int trace_save_tgid(struct task_struct *tsk)
2024{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002025 /* treat recording of idle task as a success */
2026 if (!tsk->pid)
2027 return 1;
2028
2029 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002030 return 0;
2031
2032 tgid_map[tsk->pid] = tsk->tgid;
2033 return 1;
2034}
2035
2036static bool tracing_record_taskinfo_skip(int flags)
2037{
2038 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2039 return true;
2040 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2041 return true;
2042 if (!__this_cpu_read(trace_taskinfo_save))
2043 return true;
2044 return false;
2045}
2046
2047/**
2048 * tracing_record_taskinfo - record the task info of a task
2049 *
2050 * @task - task to record
2051 * @flags - TRACE_RECORD_CMDLINE for recording comm
2052 * - TRACE_RECORD_TGID for recording tgid
2053 */
2054void tracing_record_taskinfo(struct task_struct *task, int flags)
2055{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002056 bool done;
2057
Joel Fernandesd914ba32017-06-26 19:01:55 -07002058 if (tracing_record_taskinfo_skip(flags))
2059 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002060
2061 /*
2062 * Record as much task information as possible. If some fail, continue
2063 * to try to record the others.
2064 */
2065 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2066 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2067
2068 /* If recording any information failed, retry again soon. */
2069 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002070 return;
2071
Joel Fernandesd914ba32017-06-26 19:01:55 -07002072 __this_cpu_write(trace_taskinfo_save, false);
2073}
2074
2075/**
2076 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2077 *
2078 * @prev - previous task during sched_switch
2079 * @next - next task during sched_switch
2080 * @flags - TRACE_RECORD_CMDLINE for recording comm
2081 * TRACE_RECORD_TGID for recording tgid
2082 */
2083void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2084 struct task_struct *next, int flags)
2085{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002086 bool done;
2087
Joel Fernandesd914ba32017-06-26 19:01:55 -07002088 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002089 return;
2090
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002091 /*
2092 * Record as much task information as possible. If some fail, continue
2093 * to try to record the others.
2094 */
2095 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2096 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2097 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2098 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002099
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002100 /* If recording any information failed, retry again soon. */
2101 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002102 return;
2103
2104 __this_cpu_write(trace_taskinfo_save, false);
2105}
2106
2107/* Helpers to record a specific task information */
2108void tracing_record_cmdline(struct task_struct *task)
2109{
2110 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2111}
2112
2113void tracing_record_tgid(struct task_struct *task)
2114{
2115 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002116}
2117
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002118/*
2119 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2120 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2121 * simplifies those functions and keeps them in sync.
2122 */
2123enum print_line_t trace_handle_return(struct trace_seq *s)
2124{
2125 return trace_seq_has_overflowed(s) ?
2126 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2127}
2128EXPORT_SYMBOL_GPL(trace_handle_return);
2129
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002130void
Steven Rostedt38697052008-10-01 13:14:09 -04002131tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2132 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002133{
2134 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002135
Steven Rostedt777e2082008-09-29 23:02:42 -04002136 entry->preempt_count = pc & 0xff;
2137 entry->pid = (tsk) ? tsk->pid : 0;
2138 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002139#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002140 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002141#else
2142 TRACE_FLAG_IRQS_NOSUPPORT |
2143#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002144 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002145 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302146 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002147 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2148 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002149}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002150EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002151
Steven Rostedte77405a2009-09-02 14:17:06 -04002152struct ring_buffer_event *
2153trace_buffer_lock_reserve(struct ring_buffer *buffer,
2154 int type,
2155 unsigned long len,
2156 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002157{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002158 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002159}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002160
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002161DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2162DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2163static int trace_buffered_event_ref;
2164
2165/**
2166 * trace_buffered_event_enable - enable buffering events
2167 *
2168 * When events are being filtered, it is quicker to use a temporary
2169 * buffer to write the event data into if there's a likely chance
2170 * that it will not be committed. The discard of the ring buffer
2171 * is not as fast as committing, and is much slower than copying
2172 * a commit.
2173 *
2174 * When an event is to be filtered, allocate per cpu buffers to
2175 * write the event data into, and if the event is filtered and discarded
2176 * it is simply dropped, otherwise, the entire data is to be committed
2177 * in one shot.
2178 */
2179void trace_buffered_event_enable(void)
2180{
2181 struct ring_buffer_event *event;
2182 struct page *page;
2183 int cpu;
2184
2185 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2186
2187 if (trace_buffered_event_ref++)
2188 return;
2189
2190 for_each_tracing_cpu(cpu) {
2191 page = alloc_pages_node(cpu_to_node(cpu),
2192 GFP_KERNEL | __GFP_NORETRY, 0);
2193 if (!page)
2194 goto failed;
2195
2196 event = page_address(page);
2197 memset(event, 0, sizeof(*event));
2198
2199 per_cpu(trace_buffered_event, cpu) = event;
2200
2201 preempt_disable();
2202 if (cpu == smp_processor_id() &&
2203 this_cpu_read(trace_buffered_event) !=
2204 per_cpu(trace_buffered_event, cpu))
2205 WARN_ON_ONCE(1);
2206 preempt_enable();
2207 }
2208
2209 return;
2210 failed:
2211 trace_buffered_event_disable();
2212}
2213
2214static void enable_trace_buffered_event(void *data)
2215{
2216 /* Probably not needed, but do it anyway */
2217 smp_rmb();
2218 this_cpu_dec(trace_buffered_event_cnt);
2219}
2220
2221static void disable_trace_buffered_event(void *data)
2222{
2223 this_cpu_inc(trace_buffered_event_cnt);
2224}
2225
2226/**
2227 * trace_buffered_event_disable - disable buffering events
2228 *
2229 * When a filter is removed, it is faster to not use the buffered
2230 * events, and to commit directly into the ring buffer. Free up
2231 * the temp buffers when there are no more users. This requires
2232 * special synchronization with current events.
2233 */
2234void trace_buffered_event_disable(void)
2235{
2236 int cpu;
2237
2238 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2239
2240 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2241 return;
2242
2243 if (--trace_buffered_event_ref)
2244 return;
2245
2246 preempt_disable();
2247 /* For each CPU, set the buffer as used. */
2248 smp_call_function_many(tracing_buffer_mask,
2249 disable_trace_buffered_event, NULL, 1);
2250 preempt_enable();
2251
2252 /* Wait for all current users to finish */
2253 synchronize_sched();
2254
2255 for_each_tracing_cpu(cpu) {
2256 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2257 per_cpu(trace_buffered_event, cpu) = NULL;
2258 }
2259 /*
2260 * Make sure trace_buffered_event is NULL before clearing
2261 * trace_buffered_event_cnt.
2262 */
2263 smp_wmb();
2264
2265 preempt_disable();
2266 /* Do the work on each cpu */
2267 smp_call_function_many(tracing_buffer_mask,
2268 enable_trace_buffered_event, NULL, 1);
2269 preempt_enable();
2270}
2271
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002272static struct ring_buffer *temp_buffer;
2273
Steven Rostedtef5580d2009-02-27 19:38:04 -05002274struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002275trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002276 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002277 int type, unsigned long len,
2278 unsigned long flags, int pc)
2279{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002280 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002281 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002282
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002283 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002284
Tom Zanussi00b41452018-01-15 20:51:39 -06002285 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002286 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2287 (entry = this_cpu_read(trace_buffered_event))) {
2288 /* Try to use the per cpu buffer first */
2289 val = this_cpu_inc_return(trace_buffered_event_cnt);
2290 if (val == 1) {
2291 trace_event_setup(entry, type, flags, pc);
2292 entry->array[0] = len;
2293 return entry;
2294 }
2295 this_cpu_dec(trace_buffered_event_cnt);
2296 }
2297
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002298 entry = __trace_buffer_lock_reserve(*current_rb,
2299 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002300 /*
2301 * If tracing is off, but we have triggers enabled
2302 * we still need to look at the event data. Use the temp_buffer
2303 * to store the trace event for the tigger to use. It's recusive
2304 * safe and will not be recorded anywhere.
2305 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002306 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002307 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002308 entry = __trace_buffer_lock_reserve(*current_rb,
2309 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002310 }
2311 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002312}
2313EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2314
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002315static DEFINE_SPINLOCK(tracepoint_iter_lock);
2316static DEFINE_MUTEX(tracepoint_printk_mutex);
2317
2318static void output_printk(struct trace_event_buffer *fbuffer)
2319{
2320 struct trace_event_call *event_call;
2321 struct trace_event *event;
2322 unsigned long flags;
2323 struct trace_iterator *iter = tracepoint_print_iter;
2324
2325 /* We should never get here if iter is NULL */
2326 if (WARN_ON_ONCE(!iter))
2327 return;
2328
2329 event_call = fbuffer->trace_file->event_call;
2330 if (!event_call || !event_call->event.funcs ||
2331 !event_call->event.funcs->trace)
2332 return;
2333
2334 event = &fbuffer->trace_file->event_call->event;
2335
2336 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2337 trace_seq_init(&iter->seq);
2338 iter->ent = fbuffer->entry;
2339 event_call->event.funcs->trace(iter, 0, event);
2340 trace_seq_putc(&iter->seq, 0);
2341 printk("%s", iter->seq.buffer);
2342
2343 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2344}
2345
2346int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2347 void __user *buffer, size_t *lenp,
2348 loff_t *ppos)
2349{
2350 int save_tracepoint_printk;
2351 int ret;
2352
2353 mutex_lock(&tracepoint_printk_mutex);
2354 save_tracepoint_printk = tracepoint_printk;
2355
2356 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2357
2358 /*
2359 * This will force exiting early, as tracepoint_printk
2360 * is always zero when tracepoint_printk_iter is not allocated
2361 */
2362 if (!tracepoint_print_iter)
2363 tracepoint_printk = 0;
2364
2365 if (save_tracepoint_printk == tracepoint_printk)
2366 goto out;
2367
2368 if (tracepoint_printk)
2369 static_key_enable(&tracepoint_printk_key.key);
2370 else
2371 static_key_disable(&tracepoint_printk_key.key);
2372
2373 out:
2374 mutex_unlock(&tracepoint_printk_mutex);
2375
2376 return ret;
2377}
2378
2379void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2380{
2381 if (static_key_false(&tracepoint_printk_key.key))
2382 output_printk(fbuffer);
2383
2384 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2385 fbuffer->event, fbuffer->entry,
2386 fbuffer->flags, fbuffer->pc);
2387}
2388EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2389
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002390/*
2391 * Skip 3:
2392 *
2393 * trace_buffer_unlock_commit_regs()
2394 * trace_event_buffer_commit()
2395 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302396 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002397# define STACK_SKIP 3
2398
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002399void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2400 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002401 struct ring_buffer_event *event,
2402 unsigned long flags, int pc,
2403 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002404{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002405 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002406
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002407 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002408 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002409 * Note, we can still get here via blktrace, wakeup tracer
2410 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002411 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002412 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002413 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002414 ftrace_trace_userstack(buffer, flags, pc);
2415}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002416
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002417/*
2418 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2419 */
2420void
2421trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2422 struct ring_buffer_event *event)
2423{
2424 __buffer_unlock_commit(buffer, event);
2425}
2426
Chunyan Zhang478409d2016-11-21 15:57:18 +08002427static void
2428trace_process_export(struct trace_export *export,
2429 struct ring_buffer_event *event)
2430{
2431 struct trace_entry *entry;
2432 unsigned int size = 0;
2433
2434 entry = ring_buffer_event_data(event);
2435 size = ring_buffer_event_length(event);
Felipe Balbia773d412017-06-02 13:20:25 +03002436 export->write(export, entry, size);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002437}
2438
2439static DEFINE_MUTEX(ftrace_export_lock);
2440
2441static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2442
2443static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2444
2445static inline void ftrace_exports_enable(void)
2446{
2447 static_branch_enable(&ftrace_exports_enabled);
2448}
2449
2450static inline void ftrace_exports_disable(void)
2451{
2452 static_branch_disable(&ftrace_exports_enabled);
2453}
2454
2455void ftrace_exports(struct ring_buffer_event *event)
2456{
2457 struct trace_export *export;
2458
2459 preempt_disable_notrace();
2460
2461 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2462 while (export) {
2463 trace_process_export(export, event);
2464 export = rcu_dereference_raw_notrace(export->next);
2465 }
2466
2467 preempt_enable_notrace();
2468}
2469
2470static inline void
2471add_trace_export(struct trace_export **list, struct trace_export *export)
2472{
2473 rcu_assign_pointer(export->next, *list);
2474 /*
2475 * We are entering export into the list but another
2476 * CPU might be walking that list. We need to make sure
2477 * the export->next pointer is valid before another CPU sees
2478 * the export pointer included into the list.
2479 */
2480 rcu_assign_pointer(*list, export);
2481}
2482
2483static inline int
2484rm_trace_export(struct trace_export **list, struct trace_export *export)
2485{
2486 struct trace_export **p;
2487
2488 for (p = list; *p != NULL; p = &(*p)->next)
2489 if (*p == export)
2490 break;
2491
2492 if (*p != export)
2493 return -1;
2494
2495 rcu_assign_pointer(*p, (*p)->next);
2496
2497 return 0;
2498}
2499
2500static inline void
2501add_ftrace_export(struct trace_export **list, struct trace_export *export)
2502{
2503 if (*list == NULL)
2504 ftrace_exports_enable();
2505
2506 add_trace_export(list, export);
2507}
2508
2509static inline int
2510rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2511{
2512 int ret;
2513
2514 ret = rm_trace_export(list, export);
2515 if (*list == NULL)
2516 ftrace_exports_disable();
2517
2518 return ret;
2519}
2520
2521int register_ftrace_export(struct trace_export *export)
2522{
2523 if (WARN_ON_ONCE(!export->write))
2524 return -1;
2525
2526 mutex_lock(&ftrace_export_lock);
2527
2528 add_ftrace_export(&ftrace_exports_list, export);
2529
2530 mutex_unlock(&ftrace_export_lock);
2531
2532 return 0;
2533}
2534EXPORT_SYMBOL_GPL(register_ftrace_export);
2535
2536int unregister_ftrace_export(struct trace_export *export)
2537{
2538 int ret;
2539
2540 mutex_lock(&ftrace_export_lock);
2541
2542 ret = rm_ftrace_export(&ftrace_exports_list, export);
2543
2544 mutex_unlock(&ftrace_export_lock);
2545
2546 return ret;
2547}
2548EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2549
Ingo Molnare309b412008-05-12 21:20:51 +02002550void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002551trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002552 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2553 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002554{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002555 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002556 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002557 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002558 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002559
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002560 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2561 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002562 if (!event)
2563 return;
2564 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002565 entry->ip = ip;
2566 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002567
Chunyan Zhang478409d2016-11-21 15:57:18 +08002568 if (!call_filter_check_discard(call, entry, buffer, event)) {
2569 if (static_branch_unlikely(&ftrace_exports_enabled))
2570 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002571 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002572 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002573}
2574
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002575#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002576
2577#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2578struct ftrace_stack {
2579 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2580};
2581
2582static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2583static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2584
Steven Rostedte77405a2009-09-02 14:17:06 -04002585static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002586 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002587 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002588{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002589 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002590 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002591 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002592 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002593 int use_stack;
2594 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002595
2596 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002597 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002598
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002599 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002600 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002601 * If regs is set, then these functions will not be in the way.
2602 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002603#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002604 if (!regs)
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002605 trace.skip++;
2606#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002607
2608 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002609 * Since events can happen in NMIs there's no safe way to
2610 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2611 * or NMI comes in, it will just have to use the default
2612 * FTRACE_STACK_SIZE.
2613 */
2614 preempt_disable_notrace();
2615
Shan Wei82146522012-11-19 13:21:01 +08002616 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002617 /*
2618 * We don't need any atomic variables, just a barrier.
2619 * If an interrupt comes in, we don't care, because it would
2620 * have exited and put the counter back to what we want.
2621 * We just need a barrier to keep gcc from moving things
2622 * around.
2623 */
2624 barrier();
2625 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002626 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002627 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2628
2629 if (regs)
2630 save_stack_trace_regs(regs, &trace);
2631 else
2632 save_stack_trace(&trace);
2633
2634 if (trace.nr_entries > size)
2635 size = trace.nr_entries;
2636 } else
2637 /* From now on, use_stack is a boolean */
2638 use_stack = 0;
2639
2640 size *= sizeof(unsigned long);
2641
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002642 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2643 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002644 if (!event)
2645 goto out;
2646 entry = ring_buffer_event_data(event);
2647
2648 memset(&entry->caller, 0, size);
2649
2650 if (use_stack)
2651 memcpy(&entry->caller, trace.entries,
2652 trace.nr_entries * sizeof(unsigned long));
2653 else {
2654 trace.max_entries = FTRACE_STACK_ENTRIES;
2655 trace.entries = entry->caller;
2656 if (regs)
2657 save_stack_trace_regs(regs, &trace);
2658 else
2659 save_stack_trace(&trace);
2660 }
2661
2662 entry->size = trace.nr_entries;
2663
Tom Zanussif306cc82013-10-24 08:34:17 -05002664 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002665 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002666
2667 out:
2668 /* Again, don't let gcc optimize things here */
2669 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002670 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002671 preempt_enable_notrace();
2672
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002673}
2674
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002675static inline void ftrace_trace_stack(struct trace_array *tr,
2676 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002677 unsigned long flags,
2678 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002679{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002680 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002681 return;
2682
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002683 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002684}
2685
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002686void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2687 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002688{
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002689 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2690
2691 if (rcu_is_watching()) {
2692 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2693 return;
2694 }
2695
2696 /*
2697 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2698 * but if the above rcu_is_watching() failed, then the NMI
2699 * triggered someplace critical, and rcu_irq_enter() should
2700 * not be called from NMI.
2701 */
2702 if (unlikely(in_nmi()))
2703 return;
2704
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002705 rcu_irq_enter_irqson();
2706 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2707 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04002708}
2709
Steven Rostedt03889382009-12-11 09:48:22 -05002710/**
2711 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002712 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002713 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002714void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002715{
2716 unsigned long flags;
2717
2718 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002719 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002720
2721 local_save_flags(flags);
2722
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002723#ifndef CONFIG_UNWINDER_ORC
2724 /* Skip 1 to skip this function. */
2725 skip++;
2726#endif
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002727 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2728 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002729}
2730
Steven Rostedt91e86e52010-11-10 12:56:12 +01002731static DEFINE_PER_CPU(int, user_stack_count);
2732
Steven Rostedte77405a2009-09-02 14:17:06 -04002733void
2734ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002735{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002736 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002737 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002738 struct userstack_entry *entry;
2739 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002740
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002741 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002742 return;
2743
Steven Rostedtb6345872010-03-12 20:03:30 -05002744 /*
2745 * NMIs can not handle page faults, even with fix ups.
2746 * The save user stack can (and often does) fault.
2747 */
2748 if (unlikely(in_nmi()))
2749 return;
2750
Steven Rostedt91e86e52010-11-10 12:56:12 +01002751 /*
2752 * prevent recursion, since the user stack tracing may
2753 * trigger other kernel events.
2754 */
2755 preempt_disable();
2756 if (__this_cpu_read(user_stack_count))
2757 goto out;
2758
2759 __this_cpu_inc(user_stack_count);
2760
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002761 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2762 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002763 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002764 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002765 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002766
Steven Rostedt48659d32009-09-11 11:36:23 -04002767 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002768 memset(&entry->caller, 0, sizeof(entry->caller));
2769
2770 trace.nr_entries = 0;
2771 trace.max_entries = FTRACE_STACK_ENTRIES;
2772 trace.skip = 0;
2773 trace.entries = entry->caller;
2774
2775 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002776 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002777 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002778
Li Zefan1dbd1952010-12-09 15:47:56 +08002779 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002780 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002781 out:
2782 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002783}
2784
Hannes Eder4fd27352009-02-10 19:44:12 +01002785#ifdef UNUSED
2786static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002787{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002788 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002789}
Hannes Eder4fd27352009-02-10 19:44:12 +01002790#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002791
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002792#endif /* CONFIG_STACKTRACE */
2793
Steven Rostedt07d777f2011-09-22 14:01:55 -04002794/* created for use with alloc_percpu */
2795struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002796 int nesting;
2797 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002798};
2799
2800static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002801
2802/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002803 * Thise allows for lockless recording. If we're nested too deeply, then
2804 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002805 */
2806static char *get_trace_buf(void)
2807{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002808 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002809
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002810 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002811 return NULL;
2812
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002813 buffer->nesting++;
2814
2815 /* Interrupts must see nesting incremented before we use the buffer */
2816 barrier();
2817 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002818}
2819
2820static void put_trace_buf(void)
2821{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002822 /* Don't let the decrement of nesting leak before this */
2823 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002824 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002825}
2826
2827static int alloc_percpu_trace_buffer(void)
2828{
2829 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002830
2831 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002832 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2833 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002834
2835 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002836 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002837}
2838
Steven Rostedt81698832012-10-11 10:15:05 -04002839static int buffers_allocated;
2840
Steven Rostedt07d777f2011-09-22 14:01:55 -04002841void trace_printk_init_buffers(void)
2842{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002843 if (buffers_allocated)
2844 return;
2845
2846 if (alloc_percpu_trace_buffer())
2847 return;
2848
Steven Rostedt2184db42014-05-28 13:14:40 -04002849 /* trace_printk() is for debug use only. Don't use it in production. */
2850
Joe Perchesa395d6a2016-03-22 14:28:09 -07002851 pr_warn("\n");
2852 pr_warn("**********************************************************\n");
2853 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2854 pr_warn("** **\n");
2855 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2856 pr_warn("** **\n");
2857 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2858 pr_warn("** unsafe for production use. **\n");
2859 pr_warn("** **\n");
2860 pr_warn("** If you see this message and you are not debugging **\n");
2861 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2862 pr_warn("** **\n");
2863 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2864 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002865
Steven Rostedtb382ede62012-10-10 21:44:34 -04002866 /* Expand the buffers to set size */
2867 tracing_update_buffers();
2868
Steven Rostedt07d777f2011-09-22 14:01:55 -04002869 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002870
2871 /*
2872 * trace_printk_init_buffers() can be called by modules.
2873 * If that happens, then we need to start cmdline recording
2874 * directly here. If the global_trace.buffer is already
2875 * allocated here, then this was called by module code.
2876 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002877 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002878 tracing_start_cmdline_record();
2879}
2880
2881void trace_printk_start_comm(void)
2882{
2883 /* Start tracing comms if trace printk is set */
2884 if (!buffers_allocated)
2885 return;
2886 tracing_start_cmdline_record();
2887}
2888
2889static void trace_printk_start_stop_comm(int enabled)
2890{
2891 if (!buffers_allocated)
2892 return;
2893
2894 if (enabled)
2895 tracing_start_cmdline_record();
2896 else
2897 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002898}
2899
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002900/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002901 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002902 *
2903 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002904int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002905{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002906 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002907 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002908 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002909 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002910 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002911 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002912 char *tbuffer;
2913 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002914
2915 if (unlikely(tracing_selftest_running || tracing_disabled))
2916 return 0;
2917
2918 /* Don't pollute graph traces with trace_vprintk internals */
2919 pause_graph_tracing();
2920
2921 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002922 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002923
Steven Rostedt07d777f2011-09-22 14:01:55 -04002924 tbuffer = get_trace_buf();
2925 if (!tbuffer) {
2926 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002927 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002928 }
2929
2930 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2931
2932 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002933 goto out;
2934
Steven Rostedt07d777f2011-09-22 14:01:55 -04002935 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002936 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002937 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002938 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2939 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002940 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002941 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002942 entry = ring_buffer_event_data(event);
2943 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002944 entry->fmt = fmt;
2945
Steven Rostedt07d777f2011-09-22 14:01:55 -04002946 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002947 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002948 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002949 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002950 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002951
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002952out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002953 put_trace_buf();
2954
2955out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002956 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002957 unpause_graph_tracing();
2958
2959 return len;
2960}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002961EXPORT_SYMBOL_GPL(trace_vbprintk);
2962
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01002963__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002964static int
2965__trace_array_vprintk(struct ring_buffer *buffer,
2966 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002967{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002968 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002969 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002970 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002971 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002972 unsigned long flags;
2973 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002974
2975 if (tracing_disabled || tracing_selftest_running)
2976 return 0;
2977
Steven Rostedt07d777f2011-09-22 14:01:55 -04002978 /* Don't pollute graph traces with trace_vprintk internals */
2979 pause_graph_tracing();
2980
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002981 pc = preempt_count();
2982 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002983
Steven Rostedt07d777f2011-09-22 14:01:55 -04002984
2985 tbuffer = get_trace_buf();
2986 if (!tbuffer) {
2987 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002988 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002989 }
2990
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002991 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002992
Steven Rostedt07d777f2011-09-22 14:01:55 -04002993 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002994 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002995 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2996 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002997 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002998 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002999 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003000 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003001
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003002 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003003 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003004 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003005 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003006 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003007
3008out:
3009 put_trace_buf();
3010
3011out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003012 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003013 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003014
3015 return len;
3016}
Steven Rostedt659372d2009-09-03 19:11:07 -04003017
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003018__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003019int trace_array_vprintk(struct trace_array *tr,
3020 unsigned long ip, const char *fmt, va_list args)
3021{
3022 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3023}
3024
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003025__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003026int trace_array_printk(struct trace_array *tr,
3027 unsigned long ip, const char *fmt, ...)
3028{
3029 int ret;
3030 va_list ap;
3031
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003032 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003033 return 0;
3034
3035 va_start(ap, fmt);
3036 ret = trace_array_vprintk(tr, ip, fmt, ap);
3037 va_end(ap);
3038 return ret;
3039}
3040
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003041__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003042int trace_array_printk_buf(struct ring_buffer *buffer,
3043 unsigned long ip, const char *fmt, ...)
3044{
3045 int ret;
3046 va_list ap;
3047
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003048 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003049 return 0;
3050
3051 va_start(ap, fmt);
3052 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3053 va_end(ap);
3054 return ret;
3055}
3056
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003057__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003058int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3059{
Steven Rostedta813a152009-10-09 01:41:35 -04003060 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003061}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003062EXPORT_SYMBOL_GPL(trace_vprintk);
3063
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003064static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003065{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003066 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3067
Steven Rostedt5a90f572008-09-03 17:42:51 -04003068 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003069 if (buf_iter)
3070 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003071}
3072
Ingo Molnare309b412008-05-12 21:20:51 +02003073static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003074peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3075 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003076{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003077 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003078 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003079
Steven Rostedtd7690412008-10-01 00:29:53 -04003080 if (buf_iter)
3081 event = ring_buffer_iter_peek(buf_iter, ts);
3082 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003083 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003084 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003085
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003086 if (event) {
3087 iter->ent_size = ring_buffer_event_length(event);
3088 return ring_buffer_event_data(event);
3089 }
3090 iter->ent_size = 0;
3091 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003092}
Steven Rostedtd7690412008-10-01 00:29:53 -04003093
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003094static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003095__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3096 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003097{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003098 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003099 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003100 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003101 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003102 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003103 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003104 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003105 int cpu;
3106
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003107 /*
3108 * If we are in a per_cpu trace file, don't bother by iterating over
3109 * all cpu and peek directly.
3110 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003111 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003112 if (ring_buffer_empty_cpu(buffer, cpu_file))
3113 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003114 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003115 if (ent_cpu)
3116 *ent_cpu = cpu_file;
3117
3118 return ent;
3119 }
3120
Steven Rostedtab464282008-05-12 21:21:00 +02003121 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003122
3123 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003124 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003125
Steven Rostedtbc21b472010-03-31 19:49:26 -04003126 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003127
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003128 /*
3129 * Pick the entry with the smallest timestamp:
3130 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003131 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003132 next = ent;
3133 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003134 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003135 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003136 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003137 }
3138 }
3139
Steven Rostedt12b5da32012-03-27 10:43:28 -04003140 iter->ent_size = next_size;
3141
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003142 if (ent_cpu)
3143 *ent_cpu = next_cpu;
3144
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003145 if (ent_ts)
3146 *ent_ts = next_ts;
3147
Steven Rostedtbc21b472010-03-31 19:49:26 -04003148 if (missing_events)
3149 *missing_events = next_lost;
3150
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003151 return next;
3152}
3153
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003154/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003155struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3156 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003157{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003158 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003159}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003160
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003161/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003162void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003163{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003164 iter->ent = __find_next_entry(iter, &iter->cpu,
3165 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003166
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003167 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003168 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003169
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003170 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003171}
3172
Ingo Molnare309b412008-05-12 21:20:51 +02003173static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003174{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003175 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003176 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003177}
3178
Ingo Molnare309b412008-05-12 21:20:51 +02003179static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003180{
3181 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003182 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003183 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003184
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003185 WARN_ON_ONCE(iter->leftover);
3186
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003187 (*pos)++;
3188
3189 /* can't go backwards */
3190 if (iter->idx > i)
3191 return NULL;
3192
3193 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003194 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003195 else
3196 ent = iter;
3197
3198 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003199 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003200
3201 iter->pos = *pos;
3202
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003203 return ent;
3204}
3205
Jason Wessel955b61e2010-08-05 09:22:23 -05003206void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003207{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003208 struct ring_buffer_event *event;
3209 struct ring_buffer_iter *buf_iter;
3210 unsigned long entries = 0;
3211 u64 ts;
3212
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003213 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003214
Steven Rostedt6d158a82012-06-27 20:46:14 -04003215 buf_iter = trace_buffer_iter(iter, cpu);
3216 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003217 return;
3218
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003219 ring_buffer_iter_reset(buf_iter);
3220
3221 /*
3222 * We could have the case with the max latency tracers
3223 * that a reset never took place on a cpu. This is evident
3224 * by the timestamp being before the start of the buffer.
3225 */
3226 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003227 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003228 break;
3229 entries++;
3230 ring_buffer_read(buf_iter, NULL);
3231 }
3232
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003233 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003234}
3235
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003236/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003237 * The current tracer is copied to avoid a global locking
3238 * all around.
3239 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003240static void *s_start(struct seq_file *m, loff_t *pos)
3241{
3242 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003243 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003244 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003245 void *p = NULL;
3246 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003247 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003248
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003249 /*
3250 * copy the tracer to avoid using a global lock all around.
3251 * iter->trace is a copy of current_trace, the pointer to the
3252 * name may be used instead of a strcmp(), as iter->trace->name
3253 * will point to the same string as current_trace->name.
3254 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003255 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003256 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3257 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003258 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003259
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003260#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003261 if (iter->snapshot && iter->trace->use_max_tr)
3262 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003263#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003264
3265 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003266 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003267
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003268 if (*pos != iter->pos) {
3269 iter->ent = NULL;
3270 iter->cpu = 0;
3271 iter->idx = -1;
3272
Steven Rostedtae3b5092013-01-23 15:22:59 -05003273 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003274 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003275 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003276 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003277 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003278
Lai Jiangshanac91d852010-03-02 17:54:50 +08003279 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003280 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3281 ;
3282
3283 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003284 /*
3285 * If we overflowed the seq_file before, then we want
3286 * to just reuse the trace_seq buffer again.
3287 */
3288 if (iter->leftover)
3289 p = iter;
3290 else {
3291 l = *pos - 1;
3292 p = s_next(m, p, &l);
3293 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003294 }
3295
Lai Jiangshan4f535962009-05-18 19:35:34 +08003296 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003297 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003298 return p;
3299}
3300
3301static void s_stop(struct seq_file *m, void *p)
3302{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003303 struct trace_iterator *iter = m->private;
3304
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003305#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003306 if (iter->snapshot && iter->trace->use_max_tr)
3307 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003308#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003309
3310 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003311 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003312
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003313 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003314 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003315}
3316
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003317static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003318get_total_entries(struct trace_buffer *buf,
3319 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003320{
3321 unsigned long count;
3322 int cpu;
3323
3324 *total = 0;
3325 *entries = 0;
3326
3327 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003328 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003329 /*
3330 * If this buffer has skipped entries, then we hold all
3331 * entries for the trace and we need to ignore the
3332 * ones before the time stamp.
3333 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003334 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3335 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003336 /* total is the same as the entries */
3337 *total += count;
3338 } else
3339 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003340 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003341 *entries += count;
3342 }
3343}
3344
Ingo Molnare309b412008-05-12 21:20:51 +02003345static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003346{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003347 seq_puts(m, "# _------=> CPU# \n"
3348 "# / _-----=> irqs-off \n"
3349 "# | / _----=> need-resched \n"
3350 "# || / _---=> hardirq/softirq \n"
3351 "# ||| / _--=> preempt-depth \n"
3352 "# |||| / delay \n"
3353 "# cmd pid ||||| time | caller \n"
3354 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003355}
3356
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003357static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003358{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003359 unsigned long total;
3360 unsigned long entries;
3361
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003362 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003363 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3364 entries, total, num_online_cpus());
3365 seq_puts(m, "#\n");
3366}
3367
Joel Fernandes441dae82017-06-25 22:38:43 -07003368static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3369 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003370{
Joel Fernandes441dae82017-06-25 22:38:43 -07003371 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3372
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003373 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003374
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003375 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3376 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003377}
3378
Joel Fernandes441dae82017-06-25 22:38:43 -07003379static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3380 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003381{
Joel Fernandes441dae82017-06-25 22:38:43 -07003382 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003383 const char tgid_space[] = " ";
3384 const char space[] = " ";
Joel Fernandes441dae82017-06-25 22:38:43 -07003385
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003386 seq_printf(m, "# %s _-----=> irqs-off\n",
3387 tgid ? tgid_space : space);
3388 seq_printf(m, "# %s / _----=> need-resched\n",
3389 tgid ? tgid_space : space);
3390 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3391 tgid ? tgid_space : space);
3392 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3393 tgid ? tgid_space : space);
3394 seq_printf(m, "# %s||| / delay\n",
3395 tgid ? tgid_space : space);
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003396 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003397 tgid ? " TGID " : space);
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003398 seq_printf(m, "# | | %s | |||| | |\n",
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003399 tgid ? " | " : space);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003400}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003401
Jiri Olsa62b915f2010-04-02 19:01:22 +02003402void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003403print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3404{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003405 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003406 struct trace_buffer *buf = iter->trace_buffer;
3407 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003408 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003409 unsigned long entries;
3410 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003411 const char *name = "preemption";
3412
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003413 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003414
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003415 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003416
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003417 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003418 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003419 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003420 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003421 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003422 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003423 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003424 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003425 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003426 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003427#if defined(CONFIG_PREEMPT_NONE)
3428 "server",
3429#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3430 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003431#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003432 "preempt",
3433#else
3434 "unknown",
3435#endif
3436 /* These are reserved for later use */
3437 0, 0, 0, 0);
3438#ifdef CONFIG_SMP
3439 seq_printf(m, " #P:%d)\n", num_online_cpus());
3440#else
3441 seq_puts(m, ")\n");
3442#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003443 seq_puts(m, "# -----------------\n");
3444 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003445 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003446 data->comm, data->pid,
3447 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003448 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003449 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003450
3451 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003452 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003453 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3454 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003455 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003456 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3457 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003458 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003459 }
3460
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003461 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003462}
3463
Steven Rostedta3097202008-11-07 22:36:02 -05003464static void test_cpu_buff_start(struct trace_iterator *iter)
3465{
3466 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003467 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003468
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003469 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003470 return;
3471
3472 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3473 return;
3474
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003475 if (cpumask_available(iter->started) &&
3476 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003477 return;
3478
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003479 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003480 return;
3481
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003482 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003483 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003484
3485 /* Don't print started cpu buffer for the first entry of the trace */
3486 if (iter->idx > 1)
3487 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3488 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003489}
3490
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003491static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003492{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003493 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003494 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003495 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003496 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003497 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003498
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003499 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003500
Steven Rostedta3097202008-11-07 22:36:02 -05003501 test_cpu_buff_start(iter);
3502
Steven Rostedtf633cef2008-12-23 23:24:13 -05003503 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003504
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003505 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003506 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3507 trace_print_lat_context(iter);
3508 else
3509 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003510 }
3511
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003512 if (trace_seq_has_overflowed(s))
3513 return TRACE_TYPE_PARTIAL_LINE;
3514
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003515 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003516 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003517
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003518 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003519
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003520 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003521}
3522
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003523static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003524{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003525 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003526 struct trace_seq *s = &iter->seq;
3527 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003528 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003529
3530 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003531
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003532 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003533 trace_seq_printf(s, "%d %d %llu ",
3534 entry->pid, iter->cpu, iter->ts);
3535
3536 if (trace_seq_has_overflowed(s))
3537 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003538
Steven Rostedtf633cef2008-12-23 23:24:13 -05003539 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003540 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003541 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003542
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003543 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003544
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003545 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003546}
3547
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003548static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003549{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003550 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003551 struct trace_seq *s = &iter->seq;
3552 unsigned char newline = '\n';
3553 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003554 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003555
3556 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003557
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003558 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003559 SEQ_PUT_HEX_FIELD(s, entry->pid);
3560 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3561 SEQ_PUT_HEX_FIELD(s, iter->ts);
3562 if (trace_seq_has_overflowed(s))
3563 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003564 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003565
Steven Rostedtf633cef2008-12-23 23:24:13 -05003566 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003567 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003568 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003569 if (ret != TRACE_TYPE_HANDLED)
3570 return ret;
3571 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003572
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003573 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003574
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003575 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003576}
3577
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003578static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003579{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003580 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003581 struct trace_seq *s = &iter->seq;
3582 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003583 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003584
3585 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003586
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003587 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003588 SEQ_PUT_FIELD(s, entry->pid);
3589 SEQ_PUT_FIELD(s, iter->cpu);
3590 SEQ_PUT_FIELD(s, iter->ts);
3591 if (trace_seq_has_overflowed(s))
3592 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003593 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003594
Steven Rostedtf633cef2008-12-23 23:24:13 -05003595 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003596 return event ? event->funcs->binary(iter, 0, event) :
3597 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003598}
3599
Jiri Olsa62b915f2010-04-02 19:01:22 +02003600int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003601{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003602 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003603 int cpu;
3604
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003605 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003606 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003607 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003608 buf_iter = trace_buffer_iter(iter, cpu);
3609 if (buf_iter) {
3610 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003611 return 0;
3612 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003613 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003614 return 0;
3615 }
3616 return 1;
3617 }
3618
Steven Rostedtab464282008-05-12 21:21:00 +02003619 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003620 buf_iter = trace_buffer_iter(iter, cpu);
3621 if (buf_iter) {
3622 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003623 return 0;
3624 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003625 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003626 return 0;
3627 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003628 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003629
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003630 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003631}
3632
Lai Jiangshan4f535962009-05-18 19:35:34 +08003633/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003634enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003635{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003636 struct trace_array *tr = iter->tr;
3637 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003638 enum print_line_t ret;
3639
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003640 if (iter->lost_events) {
3641 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3642 iter->cpu, iter->lost_events);
3643 if (trace_seq_has_overflowed(&iter->seq))
3644 return TRACE_TYPE_PARTIAL_LINE;
3645 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003646
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003647 if (iter->trace && iter->trace->print_line) {
3648 ret = iter->trace->print_line(iter);
3649 if (ret != TRACE_TYPE_UNHANDLED)
3650 return ret;
3651 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003652
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003653 if (iter->ent->type == TRACE_BPUTS &&
3654 trace_flags & TRACE_ITER_PRINTK &&
3655 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3656 return trace_print_bputs_msg_only(iter);
3657
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003658 if (iter->ent->type == TRACE_BPRINT &&
3659 trace_flags & TRACE_ITER_PRINTK &&
3660 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003661 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003662
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003663 if (iter->ent->type == TRACE_PRINT &&
3664 trace_flags & TRACE_ITER_PRINTK &&
3665 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003666 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003667
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003668 if (trace_flags & TRACE_ITER_BIN)
3669 return print_bin_fmt(iter);
3670
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003671 if (trace_flags & TRACE_ITER_HEX)
3672 return print_hex_fmt(iter);
3673
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003674 if (trace_flags & TRACE_ITER_RAW)
3675 return print_raw_fmt(iter);
3676
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003677 return print_trace_fmt(iter);
3678}
3679
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003680void trace_latency_header(struct seq_file *m)
3681{
3682 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003683 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003684
3685 /* print nothing if the buffers are empty */
3686 if (trace_empty(iter))
3687 return;
3688
3689 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3690 print_trace_header(m, iter);
3691
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003692 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003693 print_lat_help_header(m);
3694}
3695
Jiri Olsa62b915f2010-04-02 19:01:22 +02003696void trace_default_header(struct seq_file *m)
3697{
3698 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003699 struct trace_array *tr = iter->tr;
3700 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003701
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003702 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3703 return;
3704
Jiri Olsa62b915f2010-04-02 19:01:22 +02003705 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3706 /* print nothing if the buffers are empty */
3707 if (trace_empty(iter))
3708 return;
3709 print_trace_header(m, iter);
3710 if (!(trace_flags & TRACE_ITER_VERBOSE))
3711 print_lat_help_header(m);
3712 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003713 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3714 if (trace_flags & TRACE_ITER_IRQ_INFO)
Joel Fernandes441dae82017-06-25 22:38:43 -07003715 print_func_help_header_irq(iter->trace_buffer,
3716 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003717 else
Joel Fernandes441dae82017-06-25 22:38:43 -07003718 print_func_help_header(iter->trace_buffer, m,
3719 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003720 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003721 }
3722}
3723
Steven Rostedte0a413f2011-09-29 21:26:16 -04003724static void test_ftrace_alive(struct seq_file *m)
3725{
3726 if (!ftrace_is_dead())
3727 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003728 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3729 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003730}
3731
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003732#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003733static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003734{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003735 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3736 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3737 "# Takes a snapshot of the main buffer.\n"
3738 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3739 "# (Doesn't have to be '2' works with any number that\n"
3740 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003741}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003742
3743static void show_snapshot_percpu_help(struct seq_file *m)
3744{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003745 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003746#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003747 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3748 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003749#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003750 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3751 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003752#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003753 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3754 "# (Doesn't have to be '2' works with any number that\n"
3755 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003756}
3757
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003758static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3759{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003760 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003761 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003762 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003763 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003764
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003765 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003766 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3767 show_snapshot_main_help(m);
3768 else
3769 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003770}
3771#else
3772/* Should never be called */
3773static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3774#endif
3775
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003776static int s_show(struct seq_file *m, void *v)
3777{
3778 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003779 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003780
3781 if (iter->ent == NULL) {
3782 if (iter->tr) {
3783 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3784 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003785 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003786 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003787 if (iter->snapshot && trace_empty(iter))
3788 print_snapshot_help(m, iter);
3789 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003790 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003791 else
3792 trace_default_header(m);
3793
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003794 } else if (iter->leftover) {
3795 /*
3796 * If we filled the seq_file buffer earlier, we
3797 * want to just show it now.
3798 */
3799 ret = trace_print_seq(m, &iter->seq);
3800
3801 /* ret should this time be zero, but you never know */
3802 iter->leftover = ret;
3803
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003804 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003805 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003806 ret = trace_print_seq(m, &iter->seq);
3807 /*
3808 * If we overflow the seq_file buffer, then it will
3809 * ask us for this data again at start up.
3810 * Use that instead.
3811 * ret is 0 if seq_file write succeeded.
3812 * -1 otherwise.
3813 */
3814 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003815 }
3816
3817 return 0;
3818}
3819
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003820/*
3821 * Should be used after trace_array_get(), trace_types_lock
3822 * ensures that i_cdev was already initialized.
3823 */
3824static inline int tracing_get_cpu(struct inode *inode)
3825{
3826 if (inode->i_cdev) /* See trace_create_cpu_file() */
3827 return (long)inode->i_cdev - 1;
3828 return RING_BUFFER_ALL_CPUS;
3829}
3830
James Morris88e9d342009-09-22 16:43:43 -07003831static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003832 .start = s_start,
3833 .next = s_next,
3834 .stop = s_stop,
3835 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003836};
3837
Ingo Molnare309b412008-05-12 21:20:51 +02003838static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003839__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003840{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003841 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003842 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003843 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003844
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003845 if (tracing_disabled)
3846 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003847
Jiri Olsa50e18b92012-04-25 10:23:39 +02003848 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003849 if (!iter)
3850 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003851
Gil Fruchter72917232015-06-09 10:32:35 +03003852 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003853 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003854 if (!iter->buffer_iter)
3855 goto release;
3856
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003857 /*
3858 * We make a copy of the current tracer to avoid concurrent
3859 * changes on it while we are reading.
3860 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003861 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003862 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003863 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003864 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003865
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003866 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003867
Li Zefan79f55992009-06-15 14:58:26 +08003868 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003869 goto fail;
3870
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003871 iter->tr = tr;
3872
3873#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003874 /* Currently only the top directory has a snapshot */
3875 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003876 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003877 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003878#endif
3879 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003880 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003881 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003882 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003883 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003884
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003885 /* Notify the tracer early; before we stop tracing. */
3886 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003887 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003888
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003889 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003890 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003891 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3892
David Sharp8be07092012-11-13 12:18:22 -08003893 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003894 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003895 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3896
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003897 /* stop the trace while dumping if we are not opening "snapshot" */
3898 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003899 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003900
Steven Rostedtae3b5092013-01-23 15:22:59 -05003901 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003902 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003903 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003904 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003905 }
3906 ring_buffer_read_prepare_sync();
3907 for_each_tracing_cpu(cpu) {
3908 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003909 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003910 }
3911 } else {
3912 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003913 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003914 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003915 ring_buffer_read_prepare_sync();
3916 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003917 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003918 }
3919
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003920 mutex_unlock(&trace_types_lock);
3921
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003922 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003923
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003924 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003925 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003926 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003927 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003928release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003929 seq_release_private(inode, file);
3930 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003931}
3932
3933int tracing_open_generic(struct inode *inode, struct file *filp)
3934{
Steven Rostedt60a11772008-05-12 21:20:44 +02003935 if (tracing_disabled)
3936 return -ENODEV;
3937
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003938 filp->private_data = inode->i_private;
3939 return 0;
3940}
3941
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003942bool tracing_is_disabled(void)
3943{
3944 return (tracing_disabled) ? true: false;
3945}
3946
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003947/*
3948 * Open and update trace_array ref count.
3949 * Must have the current trace_array passed to it.
3950 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003951static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003952{
3953 struct trace_array *tr = inode->i_private;
3954
3955 if (tracing_disabled)
3956 return -ENODEV;
3957
3958 if (trace_array_get(tr) < 0)
3959 return -ENODEV;
3960
3961 filp->private_data = inode->i_private;
3962
3963 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003964}
3965
Hannes Eder4fd27352009-02-10 19:44:12 +01003966static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003967{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003968 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003969 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003970 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003971 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003972
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003973 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003974 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003975 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003976 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003977
Oleg Nesterov6484c712013-07-23 17:26:10 +02003978 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003979 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003980 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003981
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003982 for_each_tracing_cpu(cpu) {
3983 if (iter->buffer_iter[cpu])
3984 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3985 }
3986
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003987 if (iter->trace && iter->trace->close)
3988 iter->trace->close(iter);
3989
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003990 if (!iter->snapshot)
3991 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003992 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003993
3994 __trace_array_put(tr);
3995
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003996 mutex_unlock(&trace_types_lock);
3997
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003998 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003999 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004000 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004001 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004002 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004003
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004004 return 0;
4005}
4006
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004007static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4008{
4009 struct trace_array *tr = inode->i_private;
4010
4011 trace_array_put(tr);
4012 return 0;
4013}
4014
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004015static int tracing_single_release_tr(struct inode *inode, struct file *file)
4016{
4017 struct trace_array *tr = inode->i_private;
4018
4019 trace_array_put(tr);
4020
4021 return single_release(inode, file);
4022}
4023
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004024static int tracing_open(struct inode *inode, struct file *file)
4025{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004026 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004027 struct trace_iterator *iter;
4028 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004029
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004030 if (trace_array_get(tr) < 0)
4031 return -ENODEV;
4032
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004033 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004034 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4035 int cpu = tracing_get_cpu(inode);
Bo Yan8dd33bc2017-09-18 10:03:35 -07004036 struct trace_buffer *trace_buf = &tr->trace_buffer;
4037
4038#ifdef CONFIG_TRACER_MAX_TRACE
4039 if (tr->current_trace->print_max)
4040 trace_buf = &tr->max_buffer;
4041#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004042
4043 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004044 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004045 else
Bo Yan8dd33bc2017-09-18 10:03:35 -07004046 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004047 }
4048
4049 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004050 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004051 if (IS_ERR(iter))
4052 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004053 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004054 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4055 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004056
4057 if (ret < 0)
4058 trace_array_put(tr);
4059
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004060 return ret;
4061}
4062
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004063/*
4064 * Some tracers are not suitable for instance buffers.
4065 * A tracer is always available for the global array (toplevel)
4066 * or if it explicitly states that it is.
4067 */
4068static bool
4069trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4070{
4071 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4072}
4073
4074/* Find the next tracer that this trace array may use */
4075static struct tracer *
4076get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4077{
4078 while (t && !trace_ok_for_array(t, tr))
4079 t = t->next;
4080
4081 return t;
4082}
4083
Ingo Molnare309b412008-05-12 21:20:51 +02004084static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004085t_next(struct seq_file *m, void *v, loff_t *pos)
4086{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004087 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004088 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004089
4090 (*pos)++;
4091
4092 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004093 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004094
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004095 return t;
4096}
4097
4098static void *t_start(struct seq_file *m, loff_t *pos)
4099{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004100 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004101 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004102 loff_t l = 0;
4103
4104 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004105
4106 t = get_tracer_for_array(tr, trace_types);
4107 for (; t && l < *pos; t = t_next(m, t, &l))
4108 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004109
4110 return t;
4111}
4112
4113static void t_stop(struct seq_file *m, void *p)
4114{
4115 mutex_unlock(&trace_types_lock);
4116}
4117
4118static int t_show(struct seq_file *m, void *v)
4119{
4120 struct tracer *t = v;
4121
4122 if (!t)
4123 return 0;
4124
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004125 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004126 if (t->next)
4127 seq_putc(m, ' ');
4128 else
4129 seq_putc(m, '\n');
4130
4131 return 0;
4132}
4133
James Morris88e9d342009-09-22 16:43:43 -07004134static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004135 .start = t_start,
4136 .next = t_next,
4137 .stop = t_stop,
4138 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004139};
4140
4141static int show_traces_open(struct inode *inode, struct file *file)
4142{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004143 struct trace_array *tr = inode->i_private;
4144 struct seq_file *m;
4145 int ret;
4146
Steven Rostedt60a11772008-05-12 21:20:44 +02004147 if (tracing_disabled)
4148 return -ENODEV;
4149
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004150 ret = seq_open(file, &show_traces_seq_ops);
4151 if (ret)
4152 return ret;
4153
4154 m = file->private_data;
4155 m->private = tr;
4156
4157 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004158}
4159
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004160static ssize_t
4161tracing_write_stub(struct file *filp, const char __user *ubuf,
4162 size_t count, loff_t *ppos)
4163{
4164 return count;
4165}
4166
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004167loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004168{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004169 int ret;
4170
Slava Pestov364829b2010-11-24 15:13:16 -08004171 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004172 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004173 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004174 file->f_pos = ret = 0;
4175
4176 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004177}
4178
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004179static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004180 .open = tracing_open,
4181 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004182 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004183 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004184 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004185};
4186
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004187static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004188 .open = show_traces_open,
4189 .read = seq_read,
4190 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004191 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004192};
4193
4194static ssize_t
4195tracing_cpumask_read(struct file *filp, char __user *ubuf,
4196 size_t count, loff_t *ppos)
4197{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004198 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004199 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004200 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004201
Changbin Du90e406f2017-11-30 11:39:43 +08004202 len = snprintf(NULL, 0, "%*pb\n",
4203 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4204 mask_str = kmalloc(len, GFP_KERNEL);
4205 if (!mask_str)
4206 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004207
Changbin Du90e406f2017-11-30 11:39:43 +08004208 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004209 cpumask_pr_args(tr->tracing_cpumask));
4210 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004211 count = -EINVAL;
4212 goto out_err;
4213 }
Changbin Du90e406f2017-11-30 11:39:43 +08004214 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004215
4216out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004217 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004218
4219 return count;
4220}
4221
4222static ssize_t
4223tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4224 size_t count, loff_t *ppos)
4225{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004226 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304227 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004228 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304229
4230 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4231 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004232
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304233 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004234 if (err)
4235 goto err_unlock;
4236
Steven Rostedta5e25882008-12-02 15:34:05 -05004237 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004238 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004239 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004240 /*
4241 * Increase/decrease the disabled counter if we are
4242 * about to flip a bit in the cpumask:
4243 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004244 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304245 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004246 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4247 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004248 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004249 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304250 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004251 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4252 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004253 }
4254 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004255 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004256 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004257
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004258 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304259 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004260
Ingo Molnarc7078de2008-05-12 21:20:52 +02004261 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004262
4263err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004264 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004265
4266 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004267}
4268
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004269static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004270 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004271 .read = tracing_cpumask_read,
4272 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004273 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004274 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004275};
4276
Li Zefanfdb372e2009-12-08 11:15:59 +08004277static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004278{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004279 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004280 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004281 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004282 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004283
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004284 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004285 tracer_flags = tr->current_trace->flags->val;
4286 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004287
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004288 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004289 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004290 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004291 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004292 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004293 }
4294
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004295 for (i = 0; trace_opts[i].name; i++) {
4296 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004297 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004298 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004299 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004300 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004301 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004302
Li Zefanfdb372e2009-12-08 11:15:59 +08004303 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004304}
4305
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004306static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004307 struct tracer_flags *tracer_flags,
4308 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004309{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004310 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004311 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004312
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004313 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004314 if (ret)
4315 return ret;
4316
4317 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004318 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004319 else
Zhaolei77708412009-08-07 18:53:21 +08004320 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004321 return 0;
4322}
4323
Li Zefan8d18eaa2009-12-08 11:17:06 +08004324/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004325static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004326{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004327 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004328 struct tracer_flags *tracer_flags = trace->flags;
4329 struct tracer_opt *opts = NULL;
4330 int i;
4331
4332 for (i = 0; tracer_flags->opts[i].name; i++) {
4333 opts = &tracer_flags->opts[i];
4334
4335 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004336 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004337 }
4338
4339 return -EINVAL;
4340}
4341
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004342/* Some tracers require overwrite to stay enabled */
4343int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4344{
4345 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4346 return -1;
4347
4348 return 0;
4349}
4350
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004351int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004352{
4353 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004354 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004355 return 0;
4356
4357 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004358 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004359 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004360 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004361
4362 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004363 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004364 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004365 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004366
4367 if (mask == TRACE_ITER_RECORD_CMD)
4368 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004369
Joel Fernandesd914ba32017-06-26 19:01:55 -07004370 if (mask == TRACE_ITER_RECORD_TGID) {
4371 if (!tgid_map)
Kees Cook6396bb22018-06-12 14:03:40 -07004372 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4373 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004374 GFP_KERNEL);
4375 if (!tgid_map) {
4376 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4377 return -ENOMEM;
4378 }
4379
4380 trace_event_enable_tgid_record(enabled);
4381 }
4382
Steven Rostedtc37775d2016-04-13 16:59:18 -04004383 if (mask == TRACE_ITER_EVENT_FORK)
4384 trace_event_follow_fork(tr, enabled);
4385
Namhyung Kim1e104862017-04-17 11:44:28 +09004386 if (mask == TRACE_ITER_FUNC_FORK)
4387 ftrace_pid_follow_fork(tr, enabled);
4388
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004389 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004390 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004391#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004392 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004393#endif
4394 }
Steven Rostedt81698832012-10-11 10:15:05 -04004395
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004396 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004397 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004398 trace_printk_control(enabled);
4399 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004400
4401 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004402}
4403
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004404static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004405{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004406 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004407 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004408 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004409 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004410
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004411 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004412
Li Zefan8d18eaa2009-12-08 11:17:06 +08004413 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004414 neg = 1;
4415 cmp += 2;
4416 }
4417
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004418 mutex_lock(&trace_types_lock);
4419
Yisheng Xie591a0332018-05-17 16:36:03 +08004420 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004421 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004422 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004423 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004424 else
4425 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004426
4427 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004428
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004429 /*
4430 * If the first trailing whitespace is replaced with '\0' by strstrip,
4431 * turn it back into a space.
4432 */
4433 if (orig_len > strlen(option))
4434 option[strlen(option)] = ' ';
4435
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004436 return ret;
4437}
4438
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004439static void __init apply_trace_boot_options(void)
4440{
4441 char *buf = trace_boot_options_buf;
4442 char *option;
4443
4444 while (true) {
4445 option = strsep(&buf, ",");
4446
4447 if (!option)
4448 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004449
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004450 if (*option)
4451 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004452
4453 /* Put back the comma to allow this to be called again */
4454 if (buf)
4455 *(buf - 1) = ',';
4456 }
4457}
4458
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004459static ssize_t
4460tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4461 size_t cnt, loff_t *ppos)
4462{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004463 struct seq_file *m = filp->private_data;
4464 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004465 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004466 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004467
4468 if (cnt >= sizeof(buf))
4469 return -EINVAL;
4470
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004471 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004472 return -EFAULT;
4473
Steven Rostedta8dd2172013-01-09 20:54:17 -05004474 buf[cnt] = 0;
4475
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004476 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004477 if (ret < 0)
4478 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004479
Jiri Olsacf8517c2009-10-23 19:36:16 -04004480 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004481
4482 return cnt;
4483}
4484
Li Zefanfdb372e2009-12-08 11:15:59 +08004485static int tracing_trace_options_open(struct inode *inode, struct file *file)
4486{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004487 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004488 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004489
Li Zefanfdb372e2009-12-08 11:15:59 +08004490 if (tracing_disabled)
4491 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004492
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004493 if (trace_array_get(tr) < 0)
4494 return -ENODEV;
4495
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004496 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4497 if (ret < 0)
4498 trace_array_put(tr);
4499
4500 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004501}
4502
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004503static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004504 .open = tracing_trace_options_open,
4505 .read = seq_read,
4506 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004507 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004508 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004509};
4510
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004511static const char readme_msg[] =
4512 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004513 "# echo 0 > tracing_on : quick way to disable tracing\n"
4514 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4515 " Important files:\n"
4516 " trace\t\t\t- The static contents of the buffer\n"
4517 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4518 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4519 " current_tracer\t- function and latency tracers\n"
4520 " available_tracers\t- list of configured tracers for current_tracer\n"
4521 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4522 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4523 " trace_clock\t\t-change the clock used to order events\n"
4524 " local: Per cpu clock but may not be synced across CPUs\n"
4525 " global: Synced across CPUs but slows tracing down.\n"
4526 " counter: Not a clock, but just an increment\n"
4527 " uptime: Jiffy counter from time of boot\n"
4528 " perf: Same clock that perf events use\n"
4529#ifdef CONFIG_X86_64
4530 " x86-tsc: TSC cycle counter\n"
4531#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06004532 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4533 " delta: Delta difference against a buffer-wide timestamp\n"
4534 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004535 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004536 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004537 " tracing_cpumask\t- Limit which CPUs to trace\n"
4538 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4539 "\t\t\t Remove sub-buffer with rmdir\n"
4540 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004541 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4542 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004543 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004544#ifdef CONFIG_DYNAMIC_FTRACE
4545 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004546 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4547 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004548 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004549 "\t modules: Can select a group via module\n"
4550 "\t Format: :mod:<module-name>\n"
4551 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4552 "\t triggers: a command to perform when function is hit\n"
4553 "\t Format: <function>:<trigger>[:count]\n"
4554 "\t trigger: traceon, traceoff\n"
4555 "\t\t enable_event:<system>:<event>\n"
4556 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004557#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004558 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004559#endif
4560#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004561 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004562#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004563 "\t\t dump\n"
4564 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004565 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4566 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4567 "\t The first one will disable tracing every time do_fault is hit\n"
4568 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4569 "\t The first time do trap is hit and it disables tracing, the\n"
4570 "\t counter will decrement to 2. If tracing is already disabled,\n"
4571 "\t the counter will not decrement. It only decrements when the\n"
4572 "\t trigger did work\n"
4573 "\t To remove trigger without count:\n"
4574 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4575 "\t To remove trigger with a count:\n"
4576 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004577 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004578 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4579 "\t modules: Can select a group via module command :mod:\n"
4580 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004581#endif /* CONFIG_DYNAMIC_FTRACE */
4582#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004583 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4584 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004585#endif
4586#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4587 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004588 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004589 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4590#endif
4591#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004592 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4593 "\t\t\t snapshot buffer. Read the contents for more\n"
4594 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004595#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004596#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004597 " stack_trace\t\t- Shows the max stack trace when active\n"
4598 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004599 "\t\t\t Write into this file to reset the max size (trigger a\n"
4600 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004601#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004602 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4603 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004604#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004605#endif /* CONFIG_STACK_TRACER */
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004606#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004607 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4608 "\t\t\t Write into this file to define/undefine new trace events.\n"
4609#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004610#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004611 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4612 "\t\t\t Write into this file to define/undefine new trace events.\n"
4613#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004614#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004615 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09004616 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4617 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004618 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004619#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004620 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05304621 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004622#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004623#ifdef CONFIG_UPROBE_EVENTS
Ravi Bangoria1cc33162018-08-20 10:12:47 +05304624 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004625#endif
4626 "\t args: <name>=fetcharg[:type]\n"
4627 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004628#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4629 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4630#else
Masami Hiramatsu86425622016-08-18 17:58:15 +09004631 "\t $stack<index>, $stack, $retval, $comm\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004632#endif
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09004633 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09004634 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4635 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004636#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004637 " events/\t\t- Directory containing all trace event subsystems:\n"
4638 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4639 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004640 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4641 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004642 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004643 " events/<system>/<event>/\t- Directory containing control files for\n"
4644 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004645 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4646 " filter\t\t- If set, only events passing filter are traced\n"
4647 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004648 "\t Format: <trigger>[:count][if <filter>]\n"
4649 "\t trigger: traceon, traceoff\n"
4650 "\t enable_event:<system>:<event>\n"
4651 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004652#ifdef CONFIG_HIST_TRIGGERS
4653 "\t enable_hist:<system>:<event>\n"
4654 "\t disable_hist:<system>:<event>\n"
4655#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004656#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004657 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004658#endif
4659#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004660 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004661#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004662#ifdef CONFIG_HIST_TRIGGERS
4663 "\t\t hist (see below)\n"
4664#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004665 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4666 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4667 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4668 "\t events/block/block_unplug/trigger\n"
4669 "\t The first disables tracing every time block_unplug is hit.\n"
4670 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4671 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4672 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4673 "\t Like function triggers, the counter is only decremented if it\n"
4674 "\t enabled or disabled tracing.\n"
4675 "\t To remove a trigger without a count:\n"
4676 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4677 "\t To remove a trigger with a count:\n"
4678 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4679 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004680#ifdef CONFIG_HIST_TRIGGERS
4681 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004682 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004683 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004684 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004685 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004686 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004687 "\t [:name=histname1]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004688 "\t [if <filter>]\n\n"
4689 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004690 "\t table using the key(s) and value(s) named, and the value of a\n"
4691 "\t sum called 'hitcount' is incremented. Keys and values\n"
4692 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004693 "\t can be any field, or the special string 'stacktrace'.\n"
4694 "\t Compound keys consisting of up to two fields can be specified\n"
4695 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4696 "\t fields. Sort keys consisting of up to two fields can be\n"
4697 "\t specified using the 'sort' keyword. The sort direction can\n"
4698 "\t be modified by appending '.descending' or '.ascending' to a\n"
4699 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004700 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4701 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4702 "\t its histogram data will be shared with other triggers of the\n"
4703 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004704 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004705 "\t table in its entirety to stdout. If there are multiple hist\n"
4706 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004707 "\t trigger in the output. The table displayed for a named\n"
4708 "\t trigger will be the same as any other instance having the\n"
4709 "\t same name. The default format used to display a given field\n"
4710 "\t can be modified by appending any of the following modifiers\n"
4711 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004712 "\t .hex display a number as a hex value\n"
4713 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004714 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004715 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06004716 "\t .syscall display a syscall id as a syscall name\n"
4717 "\t .log2 display log2 value rather than raw number\n"
4718 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004719 "\t The 'pause' parameter can be used to pause an existing hist\n"
4720 "\t trigger or to start a hist trigger but not log any events\n"
4721 "\t until told to do so. 'continue' can be used to start or\n"
4722 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004723 "\t The 'clear' parameter will clear the contents of a running\n"
4724 "\t hist trigger and leave its current paused/active state\n"
4725 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004726 "\t The enable_hist and disable_hist triggers can be used to\n"
4727 "\t have one event conditionally start and stop another event's\n"
4728 "\t already-attached hist trigger. The syntax is analagous to\n"
4729 "\t the enable_event and disable_event triggers.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004730#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004731;
4732
4733static ssize_t
4734tracing_readme_read(struct file *filp, char __user *ubuf,
4735 size_t cnt, loff_t *ppos)
4736{
4737 return simple_read_from_buffer(ubuf, cnt, ppos,
4738 readme_msg, strlen(readme_msg));
4739}
4740
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004741static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004742 .open = tracing_open_generic,
4743 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004744 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004745};
4746
Michael Sartain99c621d2017-07-05 22:07:15 -06004747static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4748{
4749 int *ptr = v;
4750
4751 if (*pos || m->count)
4752 ptr++;
4753
4754 (*pos)++;
4755
4756 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4757 if (trace_find_tgid(*ptr))
4758 return ptr;
4759 }
4760
4761 return NULL;
4762}
4763
4764static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4765{
4766 void *v;
4767 loff_t l = 0;
4768
4769 if (!tgid_map)
4770 return NULL;
4771
4772 v = &tgid_map[0];
4773 while (l <= *pos) {
4774 v = saved_tgids_next(m, v, &l);
4775 if (!v)
4776 return NULL;
4777 }
4778
4779 return v;
4780}
4781
4782static void saved_tgids_stop(struct seq_file *m, void *v)
4783{
4784}
4785
4786static int saved_tgids_show(struct seq_file *m, void *v)
4787{
4788 int pid = (int *)v - tgid_map;
4789
4790 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4791 return 0;
4792}
4793
4794static const struct seq_operations tracing_saved_tgids_seq_ops = {
4795 .start = saved_tgids_start,
4796 .stop = saved_tgids_stop,
4797 .next = saved_tgids_next,
4798 .show = saved_tgids_show,
4799};
4800
4801static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4802{
4803 if (tracing_disabled)
4804 return -ENODEV;
4805
4806 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4807}
4808
4809
4810static const struct file_operations tracing_saved_tgids_fops = {
4811 .open = tracing_saved_tgids_open,
4812 .read = seq_read,
4813 .llseek = seq_lseek,
4814 .release = seq_release,
4815};
4816
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004817static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004818{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004819 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004820
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004821 if (*pos || m->count)
4822 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004823
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004824 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004825
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004826 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4827 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004828 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04004829 continue;
4830
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004831 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004832 }
4833
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004834 return NULL;
4835}
Avadh Patel69abe6a2009-04-10 16:04:48 -04004836
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004837static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4838{
4839 void *v;
4840 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04004841
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004842 preempt_disable();
4843 arch_spin_lock(&trace_cmdline_lock);
4844
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004845 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004846 while (l <= *pos) {
4847 v = saved_cmdlines_next(m, v, &l);
4848 if (!v)
4849 return NULL;
4850 }
4851
4852 return v;
4853}
4854
4855static void saved_cmdlines_stop(struct seq_file *m, void *v)
4856{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004857 arch_spin_unlock(&trace_cmdline_lock);
4858 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004859}
4860
4861static int saved_cmdlines_show(struct seq_file *m, void *v)
4862{
4863 char buf[TASK_COMM_LEN];
4864 unsigned int *pid = v;
4865
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04004866 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004867 seq_printf(m, "%d %s\n", *pid, buf);
4868 return 0;
4869}
4870
4871static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4872 .start = saved_cmdlines_start,
4873 .next = saved_cmdlines_next,
4874 .stop = saved_cmdlines_stop,
4875 .show = saved_cmdlines_show,
4876};
4877
4878static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4879{
4880 if (tracing_disabled)
4881 return -ENODEV;
4882
4883 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004884}
4885
4886static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004887 .open = tracing_saved_cmdlines_open,
4888 .read = seq_read,
4889 .llseek = seq_lseek,
4890 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004891};
4892
4893static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004894tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4895 size_t cnt, loff_t *ppos)
4896{
4897 char buf[64];
4898 int r;
4899
4900 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004901 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004902 arch_spin_unlock(&trace_cmdline_lock);
4903
4904 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4905}
4906
4907static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4908{
4909 kfree(s->saved_cmdlines);
4910 kfree(s->map_cmdline_to_pid);
4911 kfree(s);
4912}
4913
4914static int tracing_resize_saved_cmdlines(unsigned int val)
4915{
4916 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4917
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004918 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004919 if (!s)
4920 return -ENOMEM;
4921
4922 if (allocate_cmdlines_buffer(val, s) < 0) {
4923 kfree(s);
4924 return -ENOMEM;
4925 }
4926
4927 arch_spin_lock(&trace_cmdline_lock);
4928 savedcmd_temp = savedcmd;
4929 savedcmd = s;
4930 arch_spin_unlock(&trace_cmdline_lock);
4931 free_saved_cmdlines_buffer(savedcmd_temp);
4932
4933 return 0;
4934}
4935
4936static ssize_t
4937tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4938 size_t cnt, loff_t *ppos)
4939{
4940 unsigned long val;
4941 int ret;
4942
4943 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4944 if (ret)
4945 return ret;
4946
4947 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4948 if (!val || val > PID_MAX_DEFAULT)
4949 return -EINVAL;
4950
4951 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4952 if (ret < 0)
4953 return ret;
4954
4955 *ppos += cnt;
4956
4957 return cnt;
4958}
4959
4960static const struct file_operations tracing_saved_cmdlines_size_fops = {
4961 .open = tracing_open_generic,
4962 .read = tracing_saved_cmdlines_size_read,
4963 .write = tracing_saved_cmdlines_size_write,
4964};
4965
Jeremy Linton681bec02017-05-31 16:56:53 -05004966#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004967static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004968update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004969{
Jeremy Linton00f4b652017-05-31 16:56:43 -05004970 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004971 if (ptr->tail.next) {
4972 ptr = ptr->tail.next;
4973 /* Set ptr to the next real item (skip head) */
4974 ptr++;
4975 } else
4976 return NULL;
4977 }
4978 return ptr;
4979}
4980
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004981static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004982{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05004983 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004984
4985 /*
4986 * Paranoid! If ptr points to end, we don't want to increment past it.
4987 * This really should never happen.
4988 */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004989 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004990 if (WARN_ON_ONCE(!ptr))
4991 return NULL;
4992
4993 ptr++;
4994
4995 (*pos)++;
4996
Jeremy Lintonf57a4142017-05-31 16:56:48 -05004997 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004998
4999 return ptr;
5000}
5001
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005002static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005003{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005004 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005005 loff_t l = 0;
5006
Jeremy Linton1793ed92017-05-31 16:56:46 -05005007 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005008
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005009 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005010 if (v)
5011 v++;
5012
5013 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005014 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005015 }
5016
5017 return v;
5018}
5019
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005020static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005021{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005022 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005023}
5024
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005025static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005026{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005027 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005028
5029 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005030 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005031 ptr->map.system);
5032
5033 return 0;
5034}
5035
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005036static const struct seq_operations tracing_eval_map_seq_ops = {
5037 .start = eval_map_start,
5038 .next = eval_map_next,
5039 .stop = eval_map_stop,
5040 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005041};
5042
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005043static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005044{
5045 if (tracing_disabled)
5046 return -ENODEV;
5047
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005048 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005049}
5050
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005051static const struct file_operations tracing_eval_map_fops = {
5052 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005053 .read = seq_read,
5054 .llseek = seq_lseek,
5055 .release = seq_release,
5056};
5057
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005058static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005059trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005060{
5061 /* Return tail of array given the head */
5062 return ptr + ptr->head.length + 1;
5063}
5064
5065static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005066trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005067 int len)
5068{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005069 struct trace_eval_map **stop;
5070 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005071 union trace_eval_map_item *map_array;
5072 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005073
5074 stop = start + len;
5075
5076 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005077 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005078 * where the head holds the module and length of array, and the
5079 * tail holds a pointer to the next list.
5080 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005081 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005082 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005083 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005084 return;
5085 }
5086
Jeremy Linton1793ed92017-05-31 16:56:46 -05005087 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005088
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005089 if (!trace_eval_maps)
5090 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005091 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005092 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005093 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005094 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005095 if (!ptr->tail.next)
5096 break;
5097 ptr = ptr->tail.next;
5098
5099 }
5100 ptr->tail.next = map_array;
5101 }
5102 map_array->head.mod = mod;
5103 map_array->head.length = len;
5104 map_array++;
5105
5106 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5107 map_array->map = **map;
5108 map_array++;
5109 }
5110 memset(map_array, 0, sizeof(*map_array));
5111
Jeremy Linton1793ed92017-05-31 16:56:46 -05005112 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005113}
5114
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005115static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005116{
Jeremy Linton681bec02017-05-31 16:56:53 -05005117 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005118 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005119}
5120
Jeremy Linton681bec02017-05-31 16:56:53 -05005121#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005122static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5123static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005124 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005125#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005126
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005127static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005128 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005129{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005130 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005131
5132 if (len <= 0)
5133 return;
5134
5135 map = start;
5136
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005137 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005138
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005139 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005140}
5141
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005142static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005143tracing_set_trace_read(struct file *filp, char __user *ubuf,
5144 size_t cnt, loff_t *ppos)
5145{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005146 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005147 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005148 int r;
5149
5150 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005151 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005152 mutex_unlock(&trace_types_lock);
5153
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005154 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005155}
5156
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005157int tracer_init(struct tracer *t, struct trace_array *tr)
5158{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005159 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005160 return t->init(tr);
5161}
5162
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005163static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005164{
5165 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005166
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005167 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005168 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005169}
5170
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005171#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005172/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005173static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5174 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005175{
5176 int cpu, ret = 0;
5177
5178 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5179 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005180 ret = ring_buffer_resize(trace_buf->buffer,
5181 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005182 if (ret < 0)
5183 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005184 per_cpu_ptr(trace_buf->data, cpu)->entries =
5185 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005186 }
5187 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005188 ret = ring_buffer_resize(trace_buf->buffer,
5189 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005190 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005191 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5192 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005193 }
5194
5195 return ret;
5196}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005197#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005198
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005199static int __tracing_resize_ring_buffer(struct trace_array *tr,
5200 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005201{
5202 int ret;
5203
5204 /*
5205 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005206 * we use the size that was given, and we can forget about
5207 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005208 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005209 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005210
Steven Rostedtb382ede62012-10-10 21:44:34 -04005211 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005212 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005213 return 0;
5214
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005215 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005216 if (ret < 0)
5217 return ret;
5218
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005219#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005220 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5221 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005222 goto out;
5223
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005224 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005225 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005226 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5227 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005228 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005229 /*
5230 * AARGH! We are left with different
5231 * size max buffer!!!!
5232 * The max buffer is our "snapshot" buffer.
5233 * When a tracer needs a snapshot (one of the
5234 * latency tracers), it swaps the max buffer
5235 * with the saved snap shot. We succeeded to
5236 * update the size of the main buffer, but failed to
5237 * update the size of the max buffer. But when we tried
5238 * to reset the main buffer to the original size, we
5239 * failed there too. This is very unlikely to
5240 * happen, but if it does, warn and kill all
5241 * tracing.
5242 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005243 WARN_ON(1);
5244 tracing_disabled = 1;
5245 }
5246 return ret;
5247 }
5248
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005249 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005250 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005251 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005252 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005253
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005254 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005255#endif /* CONFIG_TRACER_MAX_TRACE */
5256
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005257 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005258 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005259 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005260 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005261
5262 return ret;
5263}
5264
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005265static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5266 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005267{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005268 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005269
5270 mutex_lock(&trace_types_lock);
5271
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005272 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5273 /* make sure, this cpu is enabled in the mask */
5274 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5275 ret = -EINVAL;
5276 goto out;
5277 }
5278 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005279
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005280 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005281 if (ret < 0)
5282 ret = -ENOMEM;
5283
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005284out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005285 mutex_unlock(&trace_types_lock);
5286
5287 return ret;
5288}
5289
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005290
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005291/**
5292 * tracing_update_buffers - used by tracing facility to expand ring buffers
5293 *
5294 * To save on memory when the tracing is never used on a system with it
5295 * configured in. The ring buffers are set to a minimum size. But once
5296 * a user starts to use the tracing facility, then they need to grow
5297 * to their default size.
5298 *
5299 * This function is to be called when a tracer is about to be used.
5300 */
5301int tracing_update_buffers(void)
5302{
5303 int ret = 0;
5304
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005305 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005306 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005307 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005308 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005309 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005310
5311 return ret;
5312}
5313
Steven Rostedt577b7852009-02-26 23:43:05 -05005314struct trace_option_dentry;
5315
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005316static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005317create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005318
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005319/*
5320 * Used to clear out the tracer before deletion of an instance.
5321 * Must have trace_types_lock held.
5322 */
5323static void tracing_set_nop(struct trace_array *tr)
5324{
5325 if (tr->current_trace == &nop_trace)
5326 return;
5327
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005328 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005329
5330 if (tr->current_trace->reset)
5331 tr->current_trace->reset(tr);
5332
5333 tr->current_trace = &nop_trace;
5334}
5335
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005336static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005337{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005338 /* Only enable if the directory has been created already. */
5339 if (!tr->dir)
5340 return;
5341
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005342 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005343}
5344
5345static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5346{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005347 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005348#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005349 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005350#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005351 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005352
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005353 mutex_lock(&trace_types_lock);
5354
Steven Rostedt73c51622009-03-11 13:42:01 -04005355 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005356 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005357 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005358 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005359 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005360 ret = 0;
5361 }
5362
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005363 for (t = trace_types; t; t = t->next) {
5364 if (strcmp(t->name, buf) == 0)
5365 break;
5366 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005367 if (!t) {
5368 ret = -EINVAL;
5369 goto out;
5370 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005371 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005372 goto out;
5373
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005374 /* Some tracers won't work on kernel command line */
5375 if (system_state < SYSTEM_RUNNING && t->noboot) {
5376 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5377 t->name);
5378 goto out;
5379 }
5380
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005381 /* Some tracers are only allowed for the top level buffer */
5382 if (!trace_ok_for_array(t, tr)) {
5383 ret = -EINVAL;
5384 goto out;
5385 }
5386
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005387 /* If trace pipe files are being read, we can't change the tracer */
5388 if (tr->current_trace->ref) {
5389 ret = -EBUSY;
5390 goto out;
5391 }
5392
Steven Rostedt9f029e82008-11-12 15:24:24 -05005393 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005394
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005395 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005396
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005397 if (tr->current_trace->reset)
5398 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005399
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005400 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005401 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005402
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005403#ifdef CONFIG_TRACER_MAX_TRACE
5404 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005405
5406 if (had_max_tr && !t->use_max_tr) {
5407 /*
5408 * We need to make sure that the update_max_tr sees that
5409 * current_trace changed to nop_trace to keep it from
5410 * swapping the buffers after we resize it.
5411 * The update_max_tr is called from interrupts disabled
5412 * so a synchronized_sched() is sufficient.
5413 */
5414 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005415 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005416 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005417#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005418
5419#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005420 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005421 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005422 if (ret < 0)
5423 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005424 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005425#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005426
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005427 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005428 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005429 if (ret)
5430 goto out;
5431 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005432
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005433 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005434 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005435 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005436 out:
5437 mutex_unlock(&trace_types_lock);
5438
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005439 return ret;
5440}
5441
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005442static ssize_t
5443tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5444 size_t cnt, loff_t *ppos)
5445{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005446 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005447 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005448 int i;
5449 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005450 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005451
Steven Rostedt60063a62008-10-28 10:44:24 -04005452 ret = cnt;
5453
Li Zefanee6c2c12009-09-18 14:06:47 +08005454 if (cnt > MAX_TRACER_SIZE)
5455 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005456
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005457 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005458 return -EFAULT;
5459
5460 buf[cnt] = 0;
5461
5462 /* strip ending whitespace. */
5463 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5464 buf[i] = 0;
5465
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005466 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005467 if (err)
5468 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005469
Jiri Olsacf8517c2009-10-23 19:36:16 -04005470 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005471
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005472 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005473}
5474
5475static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005476tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5477 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005478{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005479 char buf[64];
5480 int r;
5481
Steven Rostedtcffae432008-05-12 21:21:00 +02005482 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005483 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005484 if (r > sizeof(buf))
5485 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005486 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005487}
5488
5489static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005490tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5491 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005492{
Hannes Eder5e398412009-02-10 19:44:34 +01005493 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005494 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005495
Peter Huewe22fe9b52011-06-07 21:58:27 +02005496 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5497 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005498 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005499
5500 *ptr = val * 1000;
5501
5502 return cnt;
5503}
5504
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005505static ssize_t
5506tracing_thresh_read(struct file *filp, char __user *ubuf,
5507 size_t cnt, loff_t *ppos)
5508{
5509 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5510}
5511
5512static ssize_t
5513tracing_thresh_write(struct file *filp, const char __user *ubuf,
5514 size_t cnt, loff_t *ppos)
5515{
5516 struct trace_array *tr = filp->private_data;
5517 int ret;
5518
5519 mutex_lock(&trace_types_lock);
5520 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5521 if (ret < 0)
5522 goto out;
5523
5524 if (tr->current_trace->update_thresh) {
5525 ret = tr->current_trace->update_thresh(tr);
5526 if (ret < 0)
5527 goto out;
5528 }
5529
5530 ret = cnt;
5531out:
5532 mutex_unlock(&trace_types_lock);
5533
5534 return ret;
5535}
5536
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005537#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005538
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005539static ssize_t
5540tracing_max_lat_read(struct file *filp, char __user *ubuf,
5541 size_t cnt, loff_t *ppos)
5542{
5543 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5544}
5545
5546static ssize_t
5547tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5548 size_t cnt, loff_t *ppos)
5549{
5550 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5551}
5552
Chen Gange428abb2015-11-10 05:15:15 +08005553#endif
5554
Steven Rostedtb3806b42008-05-12 21:20:46 +02005555static int tracing_open_pipe(struct inode *inode, struct file *filp)
5556{
Oleg Nesterov15544202013-07-23 17:25:57 +02005557 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005558 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005559 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005560
5561 if (tracing_disabled)
5562 return -ENODEV;
5563
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005564 if (trace_array_get(tr) < 0)
5565 return -ENODEV;
5566
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005567 mutex_lock(&trace_types_lock);
5568
Steven Rostedtb3806b42008-05-12 21:20:46 +02005569 /* create a buffer to store the information to pass to userspace */
5570 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005571 if (!iter) {
5572 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005573 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005574 goto out;
5575 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005576
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005577 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005578 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005579
5580 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5581 ret = -ENOMEM;
5582 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305583 }
5584
Steven Rostedta3097202008-11-07 22:36:02 -05005585 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305586 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005587
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005588 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005589 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5590
David Sharp8be07092012-11-13 12:18:22 -08005591 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005592 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005593 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5594
Oleg Nesterov15544202013-07-23 17:25:57 +02005595 iter->tr = tr;
5596 iter->trace_buffer = &tr->trace_buffer;
5597 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005598 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005599 filp->private_data = iter;
5600
Steven Rostedt107bad82008-05-12 21:21:01 +02005601 if (iter->trace->pipe_open)
5602 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005603
Arnd Bergmannb4447862010-07-07 23:40:11 +02005604 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005605
5606 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005607out:
5608 mutex_unlock(&trace_types_lock);
5609 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005610
5611fail:
5612 kfree(iter->trace);
5613 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005614 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005615 mutex_unlock(&trace_types_lock);
5616 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005617}
5618
5619static int tracing_release_pipe(struct inode *inode, struct file *file)
5620{
5621 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005622 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005623
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005624 mutex_lock(&trace_types_lock);
5625
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005626 tr->current_trace->ref--;
5627
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005628 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005629 iter->trace->pipe_close(iter);
5630
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005631 mutex_unlock(&trace_types_lock);
5632
Rusty Russell44623442009-01-01 10:12:23 +10305633 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005634 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005635 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005636
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005637 trace_array_put(tr);
5638
Steven Rostedtb3806b42008-05-12 21:20:46 +02005639 return 0;
5640}
5641
Al Viro9dd95742017-07-03 00:42:43 -04005642static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005643trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005644{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005645 struct trace_array *tr = iter->tr;
5646
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005647 /* Iterators are static, they should be filled or empty */
5648 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005649 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005650
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005651 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005652 /*
5653 * Always select as readable when in blocking mode
5654 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005655 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005656 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005657 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005658 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005659}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005660
Al Viro9dd95742017-07-03 00:42:43 -04005661static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005662tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5663{
5664 struct trace_iterator *iter = filp->private_data;
5665
5666 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005667}
5668
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005669/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005670static int tracing_wait_pipe(struct file *filp)
5671{
5672 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005673 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005674
5675 while (trace_empty(iter)) {
5676
5677 if ((filp->f_flags & O_NONBLOCK)) {
5678 return -EAGAIN;
5679 }
5680
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005681 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005682 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005683 * We still block if tracing is disabled, but we have never
5684 * read anything. This allows a user to cat this file, and
5685 * then enable tracing. But after we have read something,
5686 * we give an EOF when tracing is again disabled.
5687 *
5688 * iter->pos will be 0 if we haven't read anything.
5689 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07005690 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005691 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005692
5693 mutex_unlock(&iter->mutex);
5694
Rabin Vincente30f53a2014-11-10 19:46:34 +01005695 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005696
5697 mutex_lock(&iter->mutex);
5698
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005699 if (ret)
5700 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005701 }
5702
5703 return 1;
5704}
5705
Steven Rostedtb3806b42008-05-12 21:20:46 +02005706/*
5707 * Consumer reader.
5708 */
5709static ssize_t
5710tracing_read_pipe(struct file *filp, char __user *ubuf,
5711 size_t cnt, loff_t *ppos)
5712{
5713 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005714 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005715
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005716 /*
5717 * Avoid more than one consumer on a single file descriptor
5718 * This is just a matter of traces coherency, the ring buffer itself
5719 * is protected.
5720 */
5721 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005722
5723 /* return any leftover data */
5724 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5725 if (sret != -EBUSY)
5726 goto out;
5727
5728 trace_seq_init(&iter->seq);
5729
Steven Rostedt107bad82008-05-12 21:21:01 +02005730 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005731 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5732 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005733 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005734 }
5735
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005736waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005737 sret = tracing_wait_pipe(filp);
5738 if (sret <= 0)
5739 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005740
5741 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005742 if (trace_empty(iter)) {
5743 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005744 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005745 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005746
5747 if (cnt >= PAGE_SIZE)
5748 cnt = PAGE_SIZE - 1;
5749
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005750 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005751 memset(&iter->seq, 0,
5752 sizeof(struct trace_iterator) -
5753 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005754 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005755 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005756
Lai Jiangshan4f535962009-05-18 19:35:34 +08005757 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005758 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005759 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005760 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005761 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005762
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005763 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005764 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005765 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005766 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005767 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005768 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005769 if (ret != TRACE_TYPE_NO_CONSUME)
5770 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005771
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005772 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005773 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005774
5775 /*
5776 * Setting the full flag means we reached the trace_seq buffer
5777 * size and we should leave by partial output condition above.
5778 * One of the trace_seq_* functions is not used properly.
5779 */
5780 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5781 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005782 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005783 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005784 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02005785
Steven Rostedtb3806b42008-05-12 21:20:46 +02005786 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005787 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005788 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05005789 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005790
5791 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005792 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005793 * entries, go back to wait for more entries.
5794 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005795 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005796 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005797
Steven Rostedt107bad82008-05-12 21:21:01 +02005798out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005799 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005800
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005801 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005802}
5803
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005804static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5805 unsigned int idx)
5806{
5807 __free_page(spd->pages[idx]);
5808}
5809
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005810static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005811 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005812 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005813 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005814 .steal = generic_pipe_buf_steal,
5815 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005816};
5817
Steven Rostedt34cd4992009-02-09 12:06:29 -05005818static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005819tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005820{
5821 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005822 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005823 int ret;
5824
5825 /* Seq buffer is page-sized, exactly what we need. */
5826 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005827 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005828 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005829
5830 if (trace_seq_has_overflowed(&iter->seq)) {
5831 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005832 break;
5833 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005834
5835 /*
5836 * This should not be hit, because it should only
5837 * be set if the iter->seq overflowed. But check it
5838 * anyway to be safe.
5839 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005840 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005841 iter->seq.seq.len = save_len;
5842 break;
5843 }
5844
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005845 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005846 if (rem < count) {
5847 rem = 0;
5848 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005849 break;
5850 }
5851
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005852 if (ret != TRACE_TYPE_NO_CONSUME)
5853 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005854 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005855 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005856 rem = 0;
5857 iter->ent = NULL;
5858 break;
5859 }
5860 }
5861
5862 return rem;
5863}
5864
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005865static ssize_t tracing_splice_read_pipe(struct file *filp,
5866 loff_t *ppos,
5867 struct pipe_inode_info *pipe,
5868 size_t len,
5869 unsigned int flags)
5870{
Jens Axboe35f3d142010-05-20 10:43:18 +02005871 struct page *pages_def[PIPE_DEF_BUFFERS];
5872 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005873 struct trace_iterator *iter = filp->private_data;
5874 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005875 .pages = pages_def,
5876 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005877 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005878 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005879 .ops = &tracing_pipe_buf_ops,
5880 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005881 };
5882 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005883 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005884 unsigned int i;
5885
Jens Axboe35f3d142010-05-20 10:43:18 +02005886 if (splice_grow_spd(pipe, &spd))
5887 return -ENOMEM;
5888
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005889 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005890
5891 if (iter->trace->splice_read) {
5892 ret = iter->trace->splice_read(iter, filp,
5893 ppos, pipe, len, flags);
5894 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005895 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005896 }
5897
5898 ret = tracing_wait_pipe(filp);
5899 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005900 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005901
Jason Wessel955b61e2010-08-05 09:22:23 -05005902 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005903 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005904 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005905 }
5906
Lai Jiangshan4f535962009-05-18 19:35:34 +08005907 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005908 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005909
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005910 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005911 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005912 spd.pages[i] = alloc_page(GFP_KERNEL);
5913 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005914 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005915
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005916 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005917
5918 /* Copy the data into the page, so we can start over. */
5919 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005920 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005921 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005922 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005923 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005924 break;
5925 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005926 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005927 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005928
Steven Rostedtf9520752009-03-02 14:04:40 -05005929 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005930 }
5931
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005932 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005933 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005934 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005935
5936 spd.nr_pages = i;
5937
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04005938 if (i)
5939 ret = splice_to_pipe(pipe, &spd);
5940 else
5941 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005942out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005943 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005944 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005945
Steven Rostedt34cd4992009-02-09 12:06:29 -05005946out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005947 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005948 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005949}
5950
Steven Rostedta98a3c32008-05-12 21:20:59 +02005951static ssize_t
5952tracing_entries_read(struct file *filp, char __user *ubuf,
5953 size_t cnt, loff_t *ppos)
5954{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005955 struct inode *inode = file_inode(filp);
5956 struct trace_array *tr = inode->i_private;
5957 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005958 char buf[64];
5959 int r = 0;
5960 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005961
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005962 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005963
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005964 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005965 int cpu, buf_size_same;
5966 unsigned long size;
5967
5968 size = 0;
5969 buf_size_same = 1;
5970 /* check if all cpu sizes are same */
5971 for_each_tracing_cpu(cpu) {
5972 /* fill in the size from first enabled cpu */
5973 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005974 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5975 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005976 buf_size_same = 0;
5977 break;
5978 }
5979 }
5980
5981 if (buf_size_same) {
5982 if (!ring_buffer_expanded)
5983 r = sprintf(buf, "%lu (expanded: %lu)\n",
5984 size >> 10,
5985 trace_buf_size >> 10);
5986 else
5987 r = sprintf(buf, "%lu\n", size >> 10);
5988 } else
5989 r = sprintf(buf, "X\n");
5990 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005991 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005992
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005993 mutex_unlock(&trace_types_lock);
5994
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005995 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5996 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005997}
5998
5999static ssize_t
6000tracing_entries_write(struct file *filp, const char __user *ubuf,
6001 size_t cnt, loff_t *ppos)
6002{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006003 struct inode *inode = file_inode(filp);
6004 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006005 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006006 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006007
Peter Huewe22fe9b52011-06-07 21:58:27 +02006008 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6009 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006010 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006011
6012 /* must have at least 1 entry */
6013 if (!val)
6014 return -EINVAL;
6015
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006016 /* value is in KB */
6017 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006018 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006019 if (ret < 0)
6020 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006021
Jiri Olsacf8517c2009-10-23 19:36:16 -04006022 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006023
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006024 return cnt;
6025}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006026
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006027static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006028tracing_total_entries_read(struct file *filp, char __user *ubuf,
6029 size_t cnt, loff_t *ppos)
6030{
6031 struct trace_array *tr = filp->private_data;
6032 char buf[64];
6033 int r, cpu;
6034 unsigned long size = 0, expanded_size = 0;
6035
6036 mutex_lock(&trace_types_lock);
6037 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006038 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006039 if (!ring_buffer_expanded)
6040 expanded_size += trace_buf_size >> 10;
6041 }
6042 if (ring_buffer_expanded)
6043 r = sprintf(buf, "%lu\n", size);
6044 else
6045 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6046 mutex_unlock(&trace_types_lock);
6047
6048 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6049}
6050
6051static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006052tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6053 size_t cnt, loff_t *ppos)
6054{
6055 /*
6056 * There is no need to read what the user has written, this function
6057 * is just to make sure that there is no error when "echo" is used
6058 */
6059
6060 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006061
6062 return cnt;
6063}
6064
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006065static int
6066tracing_free_buffer_release(struct inode *inode, struct file *filp)
6067{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006068 struct trace_array *tr = inode->i_private;
6069
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006070 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006071 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006072 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006073 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006074 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006075
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006076 trace_array_put(tr);
6077
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006078 return 0;
6079}
6080
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006081static ssize_t
6082tracing_mark_write(struct file *filp, const char __user *ubuf,
6083 size_t cnt, loff_t *fpos)
6084{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006085 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006086 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006087 enum event_trigger_type tt = ETT_NONE;
Steven Rostedtd696b582011-09-22 11:50:27 -04006088 struct ring_buffer *buffer;
6089 struct print_entry *entry;
6090 unsigned long irq_flags;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006091 const char faulted[] = "<faulted>";
Steven Rostedtd696b582011-09-22 11:50:27 -04006092 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006093 int size;
6094 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006095
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006096/* Used in tracing_mark_raw_write() as well */
6097#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006098
Steven Rostedtc76f0692008-11-07 22:36:02 -05006099 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006100 return -EINVAL;
6101
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006102 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006103 return -EINVAL;
6104
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006105 if (cnt > TRACE_BUF_SIZE)
6106 cnt = TRACE_BUF_SIZE;
6107
Steven Rostedtd696b582011-09-22 11:50:27 -04006108 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006109
Steven Rostedtd696b582011-09-22 11:50:27 -04006110 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006111 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6112
6113 /* If less than "<faulted>", then make sure we can still add that */
6114 if (cnt < FAULTED_SIZE)
6115 size += FAULTED_SIZE - cnt;
6116
Alexander Z Lam2d716192013-07-01 15:31:24 -07006117 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006118 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6119 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006120 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006121 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006122 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006123
6124 entry = ring_buffer_event_data(event);
6125 entry->ip = _THIS_IP_;
6126
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006127 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6128 if (len) {
6129 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6130 cnt = FAULTED_SIZE;
6131 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006132 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006133 written = cnt;
6134 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006135
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006136 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6137 /* do not add \n before testing triggers, but add \0 */
6138 entry->buf[cnt] = '\0';
6139 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6140 }
6141
Steven Rostedtd696b582011-09-22 11:50:27 -04006142 if (entry->buf[cnt - 1] != '\n') {
6143 entry->buf[cnt] = '\n';
6144 entry->buf[cnt + 1] = '\0';
6145 } else
6146 entry->buf[cnt] = '\0';
6147
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006148 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006149
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006150 if (tt)
6151 event_triggers_post_call(tr->trace_marker_file, tt);
6152
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006153 if (written > 0)
6154 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006155
Steven Rostedtfa32e852016-07-06 15:25:08 -04006156 return written;
6157}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006158
Steven Rostedtfa32e852016-07-06 15:25:08 -04006159/* Limit it for now to 3K (including tag) */
6160#define RAW_DATA_MAX_SIZE (1024*3)
6161
6162static ssize_t
6163tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6164 size_t cnt, loff_t *fpos)
6165{
6166 struct trace_array *tr = filp->private_data;
6167 struct ring_buffer_event *event;
6168 struct ring_buffer *buffer;
6169 struct raw_data_entry *entry;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006170 const char faulted[] = "<faulted>";
Steven Rostedtfa32e852016-07-06 15:25:08 -04006171 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006172 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006173 int size;
6174 int len;
6175
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006176#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6177
Steven Rostedtfa32e852016-07-06 15:25:08 -04006178 if (tracing_disabled)
6179 return -EINVAL;
6180
6181 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6182 return -EINVAL;
6183
6184 /* The marker must at least have a tag id */
6185 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6186 return -EINVAL;
6187
6188 if (cnt > TRACE_BUF_SIZE)
6189 cnt = TRACE_BUF_SIZE;
6190
6191 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6192
Steven Rostedtfa32e852016-07-06 15:25:08 -04006193 local_save_flags(irq_flags);
6194 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006195 if (cnt < FAULT_SIZE_ID)
6196 size += FAULT_SIZE_ID - cnt;
6197
Steven Rostedtfa32e852016-07-06 15:25:08 -04006198 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006199 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6200 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006201 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006202 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006203 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006204
6205 entry = ring_buffer_event_data(event);
6206
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006207 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6208 if (len) {
6209 entry->id = -1;
6210 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6211 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006212 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006213 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006214
6215 __buffer_unlock_commit(buffer, event);
6216
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006217 if (written > 0)
6218 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006219
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006220 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006221}
6222
Li Zefan13f16d22009-12-08 11:16:11 +08006223static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006224{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006225 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006226 int i;
6227
6228 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006229 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006230 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006231 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6232 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006233 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006234
Li Zefan13f16d22009-12-08 11:16:11 +08006235 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006236}
6237
Tom Zanussid71bd342018-01-15 20:52:07 -06006238int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006239{
Zhaolei5079f322009-08-25 16:12:56 +08006240 int i;
6241
Zhaolei5079f322009-08-25 16:12:56 +08006242 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6243 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6244 break;
6245 }
6246 if (i == ARRAY_SIZE(trace_clocks))
6247 return -EINVAL;
6248
Zhaolei5079f322009-08-25 16:12:56 +08006249 mutex_lock(&trace_types_lock);
6250
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006251 tr->clock_id = i;
6252
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006253 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006254
David Sharp60303ed2012-10-11 16:27:52 -07006255 /*
6256 * New clock may not be consistent with the previous clock.
6257 * Reset the buffer so that it doesn't have incomparable timestamps.
6258 */
Alexander Z Lam94571582013-08-02 18:36:16 -07006259 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006260
6261#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006262 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006263 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006264 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006265#endif
David Sharp60303ed2012-10-11 16:27:52 -07006266
Zhaolei5079f322009-08-25 16:12:56 +08006267 mutex_unlock(&trace_types_lock);
6268
Steven Rostedte1e232c2014-02-10 23:38:46 -05006269 return 0;
6270}
6271
6272static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6273 size_t cnt, loff_t *fpos)
6274{
6275 struct seq_file *m = filp->private_data;
6276 struct trace_array *tr = m->private;
6277 char buf[64];
6278 const char *clockstr;
6279 int ret;
6280
6281 if (cnt >= sizeof(buf))
6282 return -EINVAL;
6283
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006284 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006285 return -EFAULT;
6286
6287 buf[cnt] = 0;
6288
6289 clockstr = strstrip(buf);
6290
6291 ret = tracing_set_clock(tr, clockstr);
6292 if (ret)
6293 return ret;
6294
Zhaolei5079f322009-08-25 16:12:56 +08006295 *fpos += cnt;
6296
6297 return cnt;
6298}
6299
Li Zefan13f16d22009-12-08 11:16:11 +08006300static int tracing_clock_open(struct inode *inode, struct file *file)
6301{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006302 struct trace_array *tr = inode->i_private;
6303 int ret;
6304
Li Zefan13f16d22009-12-08 11:16:11 +08006305 if (tracing_disabled)
6306 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006307
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006308 if (trace_array_get(tr))
6309 return -ENODEV;
6310
6311 ret = single_open(file, tracing_clock_show, inode->i_private);
6312 if (ret < 0)
6313 trace_array_put(tr);
6314
6315 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006316}
6317
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006318static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6319{
6320 struct trace_array *tr = m->private;
6321
6322 mutex_lock(&trace_types_lock);
6323
6324 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6325 seq_puts(m, "delta [absolute]\n");
6326 else
6327 seq_puts(m, "[delta] absolute\n");
6328
6329 mutex_unlock(&trace_types_lock);
6330
6331 return 0;
6332}
6333
6334static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6335{
6336 struct trace_array *tr = inode->i_private;
6337 int ret;
6338
6339 if (tracing_disabled)
6340 return -ENODEV;
6341
6342 if (trace_array_get(tr))
6343 return -ENODEV;
6344
6345 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6346 if (ret < 0)
6347 trace_array_put(tr);
6348
6349 return ret;
6350}
6351
Tom Zanussi00b41452018-01-15 20:51:39 -06006352int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6353{
6354 int ret = 0;
6355
6356 mutex_lock(&trace_types_lock);
6357
6358 if (abs && tr->time_stamp_abs_ref++)
6359 goto out;
6360
6361 if (!abs) {
6362 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6363 ret = -EINVAL;
6364 goto out;
6365 }
6366
6367 if (--tr->time_stamp_abs_ref)
6368 goto out;
6369 }
6370
6371 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6372
6373#ifdef CONFIG_TRACER_MAX_TRACE
6374 if (tr->max_buffer.buffer)
6375 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6376#endif
6377 out:
6378 mutex_unlock(&trace_types_lock);
6379
6380 return ret;
6381}
6382
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006383struct ftrace_buffer_info {
6384 struct trace_iterator iter;
6385 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006386 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006387 unsigned int read;
6388};
6389
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006390#ifdef CONFIG_TRACER_SNAPSHOT
6391static int tracing_snapshot_open(struct inode *inode, struct file *file)
6392{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006393 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006394 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006395 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006396 int ret = 0;
6397
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006398 if (trace_array_get(tr) < 0)
6399 return -ENODEV;
6400
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006401 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006402 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006403 if (IS_ERR(iter))
6404 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006405 } else {
6406 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006407 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006408 m = kzalloc(sizeof(*m), GFP_KERNEL);
6409 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006410 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006411 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6412 if (!iter) {
6413 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006414 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006415 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006416 ret = 0;
6417
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006418 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006419 iter->trace_buffer = &tr->max_buffer;
6420 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006421 m->private = iter;
6422 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006423 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006424out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006425 if (ret < 0)
6426 trace_array_put(tr);
6427
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006428 return ret;
6429}
6430
6431static ssize_t
6432tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6433 loff_t *ppos)
6434{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006435 struct seq_file *m = filp->private_data;
6436 struct trace_iterator *iter = m->private;
6437 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006438 unsigned long val;
6439 int ret;
6440
6441 ret = tracing_update_buffers();
6442 if (ret < 0)
6443 return ret;
6444
6445 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6446 if (ret)
6447 return ret;
6448
6449 mutex_lock(&trace_types_lock);
6450
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006451 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006452 ret = -EBUSY;
6453 goto out;
6454 }
6455
6456 switch (val) {
6457 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006458 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6459 ret = -EINVAL;
6460 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006461 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006462 if (tr->allocated_snapshot)
6463 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006464 break;
6465 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006466/* Only allow per-cpu swap if the ring buffer supports it */
6467#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6468 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6469 ret = -EINVAL;
6470 break;
6471 }
6472#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006473 if (!tr->allocated_snapshot) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006474 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006475 if (ret < 0)
6476 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006477 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006478 local_irq_disable();
6479 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006480 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006481 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006482 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006483 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006484 local_irq_enable();
6485 break;
6486 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006487 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006488 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6489 tracing_reset_online_cpus(&tr->max_buffer);
6490 else
6491 tracing_reset(&tr->max_buffer, iter->cpu_file);
6492 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006493 break;
6494 }
6495
6496 if (ret >= 0) {
6497 *ppos += cnt;
6498 ret = cnt;
6499 }
6500out:
6501 mutex_unlock(&trace_types_lock);
6502 return ret;
6503}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006504
6505static int tracing_snapshot_release(struct inode *inode, struct file *file)
6506{
6507 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006508 int ret;
6509
6510 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006511
6512 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006513 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006514
6515 /* If write only, the seq_file is just a stub */
6516 if (m)
6517 kfree(m->private);
6518 kfree(m);
6519
6520 return 0;
6521}
6522
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006523static int tracing_buffers_open(struct inode *inode, struct file *filp);
6524static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6525 size_t count, loff_t *ppos);
6526static int tracing_buffers_release(struct inode *inode, struct file *file);
6527static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6528 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6529
6530static int snapshot_raw_open(struct inode *inode, struct file *filp)
6531{
6532 struct ftrace_buffer_info *info;
6533 int ret;
6534
6535 ret = tracing_buffers_open(inode, filp);
6536 if (ret < 0)
6537 return ret;
6538
6539 info = filp->private_data;
6540
6541 if (info->iter.trace->use_max_tr) {
6542 tracing_buffers_release(inode, filp);
6543 return -EBUSY;
6544 }
6545
6546 info->iter.snapshot = true;
6547 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6548
6549 return ret;
6550}
6551
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006552#endif /* CONFIG_TRACER_SNAPSHOT */
6553
6554
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006555static const struct file_operations tracing_thresh_fops = {
6556 .open = tracing_open_generic,
6557 .read = tracing_thresh_read,
6558 .write = tracing_thresh_write,
6559 .llseek = generic_file_llseek,
6560};
6561
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006562#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006563static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006564 .open = tracing_open_generic,
6565 .read = tracing_max_lat_read,
6566 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006567 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006568};
Chen Gange428abb2015-11-10 05:15:15 +08006569#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006570
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006571static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006572 .open = tracing_open_generic,
6573 .read = tracing_set_trace_read,
6574 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006575 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006576};
6577
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006578static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006579 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006580 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006581 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006582 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006583 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006584 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006585};
6586
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006587static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006588 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006589 .read = tracing_entries_read,
6590 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006591 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006592 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006593};
6594
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006595static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006596 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006597 .read = tracing_total_entries_read,
6598 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006599 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006600};
6601
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006602static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006603 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006604 .write = tracing_free_buffer_write,
6605 .release = tracing_free_buffer_release,
6606};
6607
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006608static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006609 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006610 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006611 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006612 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006613};
6614
Steven Rostedtfa32e852016-07-06 15:25:08 -04006615static const struct file_operations tracing_mark_raw_fops = {
6616 .open = tracing_open_generic_tr,
6617 .write = tracing_mark_raw_write,
6618 .llseek = generic_file_llseek,
6619 .release = tracing_release_generic_tr,
6620};
6621
Zhaolei5079f322009-08-25 16:12:56 +08006622static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006623 .open = tracing_clock_open,
6624 .read = seq_read,
6625 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006626 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006627 .write = tracing_clock_write,
6628};
6629
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006630static const struct file_operations trace_time_stamp_mode_fops = {
6631 .open = tracing_time_stamp_mode_open,
6632 .read = seq_read,
6633 .llseek = seq_lseek,
6634 .release = tracing_single_release_tr,
6635};
6636
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006637#ifdef CONFIG_TRACER_SNAPSHOT
6638static const struct file_operations snapshot_fops = {
6639 .open = tracing_snapshot_open,
6640 .read = seq_read,
6641 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006642 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006643 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006644};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006645
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006646static const struct file_operations snapshot_raw_fops = {
6647 .open = snapshot_raw_open,
6648 .read = tracing_buffers_read,
6649 .release = tracing_buffers_release,
6650 .splice_read = tracing_buffers_splice_read,
6651 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006652};
6653
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006654#endif /* CONFIG_TRACER_SNAPSHOT */
6655
Steven Rostedt2cadf912008-12-01 22:20:19 -05006656static int tracing_buffers_open(struct inode *inode, struct file *filp)
6657{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006658 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006659 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006660 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006661
6662 if (tracing_disabled)
6663 return -ENODEV;
6664
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006665 if (trace_array_get(tr) < 0)
6666 return -ENODEV;
6667
Steven Rostedt2cadf912008-12-01 22:20:19 -05006668 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006669 if (!info) {
6670 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006671 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006672 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006673
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006674 mutex_lock(&trace_types_lock);
6675
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006676 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006677 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006678 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006679 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006680 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006681 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006682 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006683
6684 filp->private_data = info;
6685
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006686 tr->current_trace->ref++;
6687
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006688 mutex_unlock(&trace_types_lock);
6689
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006690 ret = nonseekable_open(inode, filp);
6691 if (ret < 0)
6692 trace_array_put(tr);
6693
6694 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006695}
6696
Al Viro9dd95742017-07-03 00:42:43 -04006697static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006698tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6699{
6700 struct ftrace_buffer_info *info = filp->private_data;
6701 struct trace_iterator *iter = &info->iter;
6702
6703 return trace_poll(iter, filp, poll_table);
6704}
6705
Steven Rostedt2cadf912008-12-01 22:20:19 -05006706static ssize_t
6707tracing_buffers_read(struct file *filp, char __user *ubuf,
6708 size_t count, loff_t *ppos)
6709{
6710 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006711 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006712 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006713 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006714
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006715 if (!count)
6716 return 0;
6717
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006718#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006719 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6720 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006721#endif
6722
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006723 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006724 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6725 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006726 if (IS_ERR(info->spare)) {
6727 ret = PTR_ERR(info->spare);
6728 info->spare = NULL;
6729 } else {
6730 info->spare_cpu = iter->cpu_file;
6731 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006732 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006733 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006734 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006735
Steven Rostedt2cadf912008-12-01 22:20:19 -05006736 /* Do we have previous read data to read? */
6737 if (info->read < PAGE_SIZE)
6738 goto read;
6739
Steven Rostedtb6273442013-02-28 13:44:11 -05006740 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006741 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006742 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006743 &info->spare,
6744 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006745 iter->cpu_file, 0);
6746 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006747
6748 if (ret < 0) {
6749 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006750 if ((filp->f_flags & O_NONBLOCK))
6751 return -EAGAIN;
6752
Rabin Vincente30f53a2014-11-10 19:46:34 +01006753 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006754 if (ret)
6755 return ret;
6756
Steven Rostedtb6273442013-02-28 13:44:11 -05006757 goto again;
6758 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006759 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006760 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006761
Steven Rostedt436fc282011-10-14 10:44:25 -04006762 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006763 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006764 size = PAGE_SIZE - info->read;
6765 if (size > count)
6766 size = count;
6767
6768 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006769 if (ret == size)
6770 return -EFAULT;
6771
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006772 size -= ret;
6773
Steven Rostedt2cadf912008-12-01 22:20:19 -05006774 *ppos += size;
6775 info->read += size;
6776
6777 return size;
6778}
6779
6780static int tracing_buffers_release(struct inode *inode, struct file *file)
6781{
6782 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006783 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006784
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006785 mutex_lock(&trace_types_lock);
6786
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006787 iter->tr->current_trace->ref--;
6788
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006789 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006790
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006791 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006792 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6793 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006794 kfree(info);
6795
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006796 mutex_unlock(&trace_types_lock);
6797
Steven Rostedt2cadf912008-12-01 22:20:19 -05006798 return 0;
6799}
6800
6801struct buffer_ref {
6802 struct ring_buffer *buffer;
6803 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006804 int cpu;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006805 int ref;
6806};
6807
6808static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6809 struct pipe_buffer *buf)
6810{
6811 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6812
6813 if (--ref->ref)
6814 return;
6815
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006816 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006817 kfree(ref);
6818 buf->private = 0;
6819}
6820
Steven Rostedt2cadf912008-12-01 22:20:19 -05006821static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6822 struct pipe_buffer *buf)
6823{
6824 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6825
6826 ref->ref++;
6827}
6828
6829/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006830static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006831 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006832 .confirm = generic_pipe_buf_confirm,
6833 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09006834 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006835 .get = buffer_pipe_buf_get,
6836};
6837
6838/*
6839 * Callback from splice_to_pipe(), if we need to release some pages
6840 * at the end of the spd in case we error'ed out in filling the pipe.
6841 */
6842static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6843{
6844 struct buffer_ref *ref =
6845 (struct buffer_ref *)spd->partial[i].private;
6846
6847 if (--ref->ref)
6848 return;
6849
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006850 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006851 kfree(ref);
6852 spd->partial[i].private = 0;
6853}
6854
6855static ssize_t
6856tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6857 struct pipe_inode_info *pipe, size_t len,
6858 unsigned int flags)
6859{
6860 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006861 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02006862 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6863 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05006864 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006865 .pages = pages_def,
6866 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02006867 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006868 .ops = &buffer_pipe_buf_ops,
6869 .spd_release = buffer_spd_release,
6870 };
6871 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05006872 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01006873 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006874
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006875#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006876 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6877 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006878#endif
6879
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006880 if (*ppos & (PAGE_SIZE - 1))
6881 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006882
6883 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006884 if (len < PAGE_SIZE)
6885 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006886 len &= PAGE_MASK;
6887 }
6888
Al Viro1ae22932016-09-17 18:31:46 -04006889 if (splice_grow_spd(pipe, &spd))
6890 return -ENOMEM;
6891
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006892 again:
6893 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006894 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04006895
Al Viroa786c062014-04-11 12:01:03 -04006896 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05006897 struct page *page;
6898 int r;
6899
6900 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01006901 if (!ref) {
6902 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006903 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01006904 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006905
Steven Rostedt7267fa62009-04-29 00:16:21 -04006906 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006907 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006908 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006909 if (IS_ERR(ref->page)) {
6910 ret = PTR_ERR(ref->page);
6911 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006912 kfree(ref);
6913 break;
6914 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006915 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006916
6917 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006918 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006919 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006920 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6921 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006922 kfree(ref);
6923 break;
6924 }
6925
Steven Rostedt2cadf912008-12-01 22:20:19 -05006926 page = virt_to_page(ref->page);
6927
6928 spd.pages[i] = page;
6929 spd.partial[i].len = PAGE_SIZE;
6930 spd.partial[i].offset = 0;
6931 spd.partial[i].private = (unsigned long)ref;
6932 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006933 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006934
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006935 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006936 }
6937
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006938 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006939 spd.nr_pages = i;
6940
6941 /* did we read anything? */
6942 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006943 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006944 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006945
Al Viro1ae22932016-09-17 18:31:46 -04006946 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006947 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04006948 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006949
Rabin Vincente30f53a2014-11-10 19:46:34 +01006950 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006951 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04006952 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006953
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006954 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006955 }
6956
6957 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04006958out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006959 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006960
Steven Rostedt2cadf912008-12-01 22:20:19 -05006961 return ret;
6962}
6963
6964static const struct file_operations tracing_buffers_fops = {
6965 .open = tracing_buffers_open,
6966 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006967 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006968 .release = tracing_buffers_release,
6969 .splice_read = tracing_buffers_splice_read,
6970 .llseek = no_llseek,
6971};
6972
Steven Rostedtc8d77182009-04-29 18:03:45 -04006973static ssize_t
6974tracing_stats_read(struct file *filp, char __user *ubuf,
6975 size_t count, loff_t *ppos)
6976{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006977 struct inode *inode = file_inode(filp);
6978 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006979 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006980 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006981 struct trace_seq *s;
6982 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006983 unsigned long long t;
6984 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006985
Li Zefane4f2d102009-06-15 10:57:28 +08006986 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006987 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006988 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006989
6990 trace_seq_init(s);
6991
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006992 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006993 trace_seq_printf(s, "entries: %ld\n", cnt);
6994
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006995 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006996 trace_seq_printf(s, "overrun: %ld\n", cnt);
6997
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006998 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006999 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7000
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007001 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007002 trace_seq_printf(s, "bytes: %ld\n", cnt);
7003
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007004 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007005 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007006 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007007 usec_rem = do_div(t, USEC_PER_SEC);
7008 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7009 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007010
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007011 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007012 usec_rem = do_div(t, USEC_PER_SEC);
7013 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7014 } else {
7015 /* counter or tsc mode for trace_clock */
7016 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007017 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007018
7019 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007020 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007021 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007022
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007023 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007024 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7025
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007026 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007027 trace_seq_printf(s, "read events: %ld\n", cnt);
7028
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007029 count = simple_read_from_buffer(ubuf, count, ppos,
7030 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007031
7032 kfree(s);
7033
7034 return count;
7035}
7036
7037static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007038 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007039 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007040 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007041 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007042};
7043
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007044#ifdef CONFIG_DYNAMIC_FTRACE
7045
7046static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007047tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007048 size_t cnt, loff_t *ppos)
7049{
7050 unsigned long *p = filp->private_data;
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007051 char buf[64]; /* Not too big for a shallow stack */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007052 int r;
7053
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007054 r = scnprintf(buf, 63, "%ld", *p);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007055 buf[r++] = '\n';
7056
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007057 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007058}
7059
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007060static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007061 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007062 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007063 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007064};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007065#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007066
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007067#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7068static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007069ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007070 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007071 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007072{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007073 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007074}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007075
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007076static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007077ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007078 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007079 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007080{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007081 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007082 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007083
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007084 if (mapper)
7085 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007086
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007087 if (count) {
7088
7089 if (*count <= 0)
7090 return;
7091
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007092 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007093 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007094
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007095 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007096}
7097
7098static int
7099ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7100 struct ftrace_probe_ops *ops, void *data)
7101{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007102 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007103 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007104
7105 seq_printf(m, "%ps:", (void *)ip);
7106
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007107 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007108
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007109 if (mapper)
7110 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7111
7112 if (count)
7113 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007114 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007115 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007116
7117 return 0;
7118}
7119
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007120static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007121ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007122 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007123{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007124 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007125
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007126 if (!mapper) {
7127 mapper = allocate_ftrace_func_mapper();
7128 if (!mapper)
7129 return -ENOMEM;
7130 *data = mapper;
7131 }
7132
7133 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007134}
7135
7136static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007137ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007138 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007139{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007140 struct ftrace_func_mapper *mapper = data;
7141
7142 if (!ip) {
7143 if (!mapper)
7144 return;
7145 free_ftrace_func_mapper(mapper, NULL);
7146 return;
7147 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007148
7149 ftrace_func_mapper_remove_ip(mapper, ip);
7150}
7151
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007152static struct ftrace_probe_ops snapshot_probe_ops = {
7153 .func = ftrace_snapshot,
7154 .print = ftrace_snapshot_print,
7155};
7156
7157static struct ftrace_probe_ops snapshot_count_probe_ops = {
7158 .func = ftrace_count_snapshot,
7159 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007160 .init = ftrace_snapshot_init,
7161 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007162};
7163
7164static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007165ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007166 char *glob, char *cmd, char *param, int enable)
7167{
7168 struct ftrace_probe_ops *ops;
7169 void *count = (void *)-1;
7170 char *number;
7171 int ret;
7172
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007173 if (!tr)
7174 return -ENODEV;
7175
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007176 /* hash funcs only work with set_ftrace_filter */
7177 if (!enable)
7178 return -EINVAL;
7179
7180 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7181
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007182 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007183 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007184
7185 if (!param)
7186 goto out_reg;
7187
7188 number = strsep(&param, ":");
7189
7190 if (!strlen(number))
7191 goto out_reg;
7192
7193 /*
7194 * We use the callback data field (which is a pointer)
7195 * as our counter.
7196 */
7197 ret = kstrtoul(number, 0, (unsigned long *)&count);
7198 if (ret)
7199 return ret;
7200
7201 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007202 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007203 if (ret < 0)
7204 goto out;
7205
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007206 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007207
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007208 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007209 return ret < 0 ? ret : 0;
7210}
7211
7212static struct ftrace_func_command ftrace_snapshot_cmd = {
7213 .name = "snapshot",
7214 .func = ftrace_trace_snapshot_callback,
7215};
7216
Tom Zanussi38de93a2013-10-24 08:34:18 -05007217static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007218{
7219 return register_ftrace_command(&ftrace_snapshot_cmd);
7220}
7221#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007222static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007223#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007224
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007225static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007226{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007227 if (WARN_ON(!tr->dir))
7228 return ERR_PTR(-ENODEV);
7229
7230 /* Top directory uses NULL as the parent */
7231 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7232 return NULL;
7233
7234 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007235 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007236}
7237
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007238static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7239{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007240 struct dentry *d_tracer;
7241
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007242 if (tr->percpu_dir)
7243 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007244
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007245 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007246 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007247 return NULL;
7248
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007249 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007250
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007251 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007252 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007253
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007254 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007255}
7256
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007257static struct dentry *
7258trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7259 void *data, long cpu, const struct file_operations *fops)
7260{
7261 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7262
7263 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007264 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007265 return ret;
7266}
7267
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007268static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007269tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007270{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007271 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007272 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007273 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007274
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007275 if (!d_percpu)
7276 return;
7277
Steven Rostedtdd49a382010-10-20 21:51:26 -04007278 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007279 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007280 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007281 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007282 return;
7283 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007284
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007285 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007286 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02007287 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007288
7289 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007290 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007291 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04007292
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007293 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007294 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007295
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007296 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007297 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08007298
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007299 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007300 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007301
7302#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007303 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007304 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007305
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007306 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007307 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007308#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007309}
7310
Steven Rostedt60a11772008-05-12 21:20:44 +02007311#ifdef CONFIG_FTRACE_SELFTEST
7312/* Let selftest have access to static functions in this file */
7313#include "trace_selftest.c"
7314#endif
7315
Steven Rostedt577b7852009-02-26 23:43:05 -05007316static ssize_t
7317trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7318 loff_t *ppos)
7319{
7320 struct trace_option_dentry *topt = filp->private_data;
7321 char *buf;
7322
7323 if (topt->flags->val & topt->opt->bit)
7324 buf = "1\n";
7325 else
7326 buf = "0\n";
7327
7328 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7329}
7330
7331static ssize_t
7332trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7333 loff_t *ppos)
7334{
7335 struct trace_option_dentry *topt = filp->private_data;
7336 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007337 int ret;
7338
Peter Huewe22fe9b52011-06-07 21:58:27 +02007339 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7340 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007341 return ret;
7342
Li Zefan8d18eaa2009-12-08 11:17:06 +08007343 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007344 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007345
7346 if (!!(topt->flags->val & topt->opt->bit) != val) {
7347 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007348 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007349 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007350 mutex_unlock(&trace_types_lock);
7351 if (ret)
7352 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007353 }
7354
7355 *ppos += cnt;
7356
7357 return cnt;
7358}
7359
7360
7361static const struct file_operations trace_options_fops = {
7362 .open = tracing_open_generic,
7363 .read = trace_options_read,
7364 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007365 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007366};
7367
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007368/*
7369 * In order to pass in both the trace_array descriptor as well as the index
7370 * to the flag that the trace option file represents, the trace_array
7371 * has a character array of trace_flags_index[], which holds the index
7372 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7373 * The address of this character array is passed to the flag option file
7374 * read/write callbacks.
7375 *
7376 * In order to extract both the index and the trace_array descriptor,
7377 * get_tr_index() uses the following algorithm.
7378 *
7379 * idx = *ptr;
7380 *
7381 * As the pointer itself contains the address of the index (remember
7382 * index[1] == 1).
7383 *
7384 * Then to get the trace_array descriptor, by subtracting that index
7385 * from the ptr, we get to the start of the index itself.
7386 *
7387 * ptr - idx == &index[0]
7388 *
7389 * Then a simple container_of() from that pointer gets us to the
7390 * trace_array descriptor.
7391 */
7392static void get_tr_index(void *data, struct trace_array **ptr,
7393 unsigned int *pindex)
7394{
7395 *pindex = *(unsigned char *)data;
7396
7397 *ptr = container_of(data - *pindex, struct trace_array,
7398 trace_flags_index);
7399}
7400
Steven Rostedta8259072009-02-26 22:19:12 -05007401static ssize_t
7402trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7403 loff_t *ppos)
7404{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007405 void *tr_index = filp->private_data;
7406 struct trace_array *tr;
7407 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007408 char *buf;
7409
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007410 get_tr_index(tr_index, &tr, &index);
7411
7412 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007413 buf = "1\n";
7414 else
7415 buf = "0\n";
7416
7417 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7418}
7419
7420static ssize_t
7421trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7422 loff_t *ppos)
7423{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007424 void *tr_index = filp->private_data;
7425 struct trace_array *tr;
7426 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007427 unsigned long val;
7428 int ret;
7429
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007430 get_tr_index(tr_index, &tr, &index);
7431
Peter Huewe22fe9b52011-06-07 21:58:27 +02007432 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7433 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007434 return ret;
7435
Zhaoleif2d84b62009-08-07 18:55:48 +08007436 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007437 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007438
7439 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007440 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007441 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007442
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007443 if (ret < 0)
7444 return ret;
7445
Steven Rostedta8259072009-02-26 22:19:12 -05007446 *ppos += cnt;
7447
7448 return cnt;
7449}
7450
Steven Rostedta8259072009-02-26 22:19:12 -05007451static const struct file_operations trace_options_core_fops = {
7452 .open = tracing_open_generic,
7453 .read = trace_options_core_read,
7454 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007455 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007456};
7457
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007458struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007459 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007460 struct dentry *parent,
7461 void *data,
7462 const struct file_operations *fops)
7463{
7464 struct dentry *ret;
7465
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007466 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007467 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007468 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007469
7470 return ret;
7471}
7472
7473
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007474static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007475{
7476 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007477
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007478 if (tr->options)
7479 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007480
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007481 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007482 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007483 return NULL;
7484
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007485 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007486 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007487 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007488 return NULL;
7489 }
7490
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007491 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007492}
7493
Steven Rostedt577b7852009-02-26 23:43:05 -05007494static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007495create_trace_option_file(struct trace_array *tr,
7496 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007497 struct tracer_flags *flags,
7498 struct tracer_opt *opt)
7499{
7500 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007501
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007502 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007503 if (!t_options)
7504 return;
7505
7506 topt->flags = flags;
7507 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007508 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007509
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007510 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007511 &trace_options_fops);
7512
Steven Rostedt577b7852009-02-26 23:43:05 -05007513}
7514
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007515static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007516create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007517{
7518 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007519 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007520 struct tracer_flags *flags;
7521 struct tracer_opt *opts;
7522 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007523 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007524
7525 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007526 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007527
7528 flags = tracer->flags;
7529
7530 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007531 return;
7532
7533 /*
7534 * If this is an instance, only create flags for tracers
7535 * the instance may have.
7536 */
7537 if (!trace_ok_for_array(tracer, tr))
7538 return;
7539
7540 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007541 /* Make sure there's no duplicate flags. */
7542 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007543 return;
7544 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007545
7546 opts = flags->opts;
7547
7548 for (cnt = 0; opts[cnt].name; cnt++)
7549 ;
7550
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007551 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007552 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007553 return;
7554
7555 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7556 GFP_KERNEL);
7557 if (!tr_topts) {
7558 kfree(topts);
7559 return;
7560 }
7561
7562 tr->topts = tr_topts;
7563 tr->topts[tr->nr_topts].tracer = tracer;
7564 tr->topts[tr->nr_topts].topts = topts;
7565 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007566
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007567 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007568 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007569 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007570 WARN_ONCE(topts[cnt].entry == NULL,
7571 "Failed to create trace option: %s",
7572 opts[cnt].name);
7573 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007574}
7575
Steven Rostedta8259072009-02-26 22:19:12 -05007576static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007577create_trace_option_core_file(struct trace_array *tr,
7578 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007579{
7580 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007581
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007582 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007583 if (!t_options)
7584 return NULL;
7585
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007586 return trace_create_file(option, 0644, t_options,
7587 (void *)&tr->trace_flags_index[index],
7588 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007589}
7590
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007591static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007592{
7593 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007594 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007595 int i;
7596
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007597 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007598 if (!t_options)
7599 return;
7600
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007601 for (i = 0; trace_options[i]; i++) {
7602 if (top_level ||
7603 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7604 create_trace_option_core_file(tr, trace_options[i], i);
7605 }
Steven Rostedta8259072009-02-26 22:19:12 -05007606}
7607
Steven Rostedt499e5472012-02-22 15:50:28 -05007608static ssize_t
7609rb_simple_read(struct file *filp, char __user *ubuf,
7610 size_t cnt, loff_t *ppos)
7611{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007612 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007613 char buf[64];
7614 int r;
7615
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007616 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007617 r = sprintf(buf, "%d\n", r);
7618
7619 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7620}
7621
7622static ssize_t
7623rb_simple_write(struct file *filp, const char __user *ubuf,
7624 size_t cnt, loff_t *ppos)
7625{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007626 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007627 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007628 unsigned long val;
7629 int ret;
7630
7631 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7632 if (ret)
7633 return ret;
7634
7635 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007636 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04007637 if (!!val == tracer_tracing_is_on(tr)) {
7638 val = 0; /* do nothing */
7639 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007640 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007641 if (tr->current_trace->start)
7642 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007643 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007644 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007645 if (tr->current_trace->stop)
7646 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007647 }
7648 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007649 }
7650
7651 (*ppos)++;
7652
7653 return cnt;
7654}
7655
7656static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007657 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007658 .read = rb_simple_read,
7659 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007660 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007661 .llseek = default_llseek,
7662};
7663
Steven Rostedt277ba042012-08-03 16:10:49 -04007664struct dentry *trace_instance_dir;
7665
7666static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007667init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007668
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007669static int
7670allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007671{
7672 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007673
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007674 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007675
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007676 buf->tr = tr;
7677
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007678 buf->buffer = ring_buffer_alloc(size, rb_flags);
7679 if (!buf->buffer)
7680 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007681
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007682 buf->data = alloc_percpu(struct trace_array_cpu);
7683 if (!buf->data) {
7684 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05007685 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007686 return -ENOMEM;
7687 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007688
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007689 /* Allocate the first page for all buffers */
7690 set_buffer_entries(&tr->trace_buffer,
7691 ring_buffer_size(tr->trace_buffer.buffer, 0));
7692
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007693 return 0;
7694}
7695
7696static int allocate_trace_buffers(struct trace_array *tr, int size)
7697{
7698 int ret;
7699
7700 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7701 if (ret)
7702 return ret;
7703
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007704#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007705 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7706 allocate_snapshot ? size : 1);
7707 if (WARN_ON(ret)) {
7708 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia24f2aaf2017-12-26 15:12:53 +08007709 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007710 free_percpu(tr->trace_buffer.data);
Jing Xia24f2aaf2017-12-26 15:12:53 +08007711 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007712 return -ENOMEM;
7713 }
7714 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007715
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007716 /*
7717 * Only the top level trace array gets its snapshot allocated
7718 * from the kernel command line.
7719 */
7720 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007721#endif
7722 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007723}
7724
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007725static void free_trace_buffer(struct trace_buffer *buf)
7726{
7727 if (buf->buffer) {
7728 ring_buffer_free(buf->buffer);
7729 buf->buffer = NULL;
7730 free_percpu(buf->data);
7731 buf->data = NULL;
7732 }
7733}
7734
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007735static void free_trace_buffers(struct trace_array *tr)
7736{
7737 if (!tr)
7738 return;
7739
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007740 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007741
7742#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007743 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007744#endif
7745}
7746
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007747static void init_trace_flags_index(struct trace_array *tr)
7748{
7749 int i;
7750
7751 /* Used by the trace options files */
7752 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7753 tr->trace_flags_index[i] = i;
7754}
7755
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007756static void __update_tracer_options(struct trace_array *tr)
7757{
7758 struct tracer *t;
7759
7760 for (t = trace_types; t; t = t->next)
7761 add_tracer_options(tr, t);
7762}
7763
7764static void update_tracer_options(struct trace_array *tr)
7765{
7766 mutex_lock(&trace_types_lock);
7767 __update_tracer_options(tr);
7768 mutex_unlock(&trace_types_lock);
7769}
7770
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007771static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007772{
Steven Rostedt277ba042012-08-03 16:10:49 -04007773 struct trace_array *tr;
7774 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04007775
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007776 mutex_lock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007777 mutex_lock(&trace_types_lock);
7778
7779 ret = -EEXIST;
7780 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7781 if (tr->name && strcmp(tr->name, name) == 0)
7782 goto out_unlock;
7783 }
7784
7785 ret = -ENOMEM;
7786 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7787 if (!tr)
7788 goto out_unlock;
7789
7790 tr->name = kstrdup(name, GFP_KERNEL);
7791 if (!tr->name)
7792 goto out_free_tr;
7793
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007794 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7795 goto out_free_tr;
7796
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007797 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007798
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007799 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7800
Steven Rostedt277ba042012-08-03 16:10:49 -04007801 raw_spin_lock_init(&tr->start_lock);
7802
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007803 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7804
Steven Rostedt277ba042012-08-03 16:10:49 -04007805 tr->current_trace = &nop_trace;
7806
7807 INIT_LIST_HEAD(&tr->systems);
7808 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06007809 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt277ba042012-08-03 16:10:49 -04007810
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007811 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04007812 goto out_free_tr;
7813
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007814 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007815 if (!tr->dir)
7816 goto out_free_tr;
7817
7818 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007819 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007820 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007821 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07007822 }
Steven Rostedt277ba042012-08-03 16:10:49 -04007823
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007824 ftrace_init_trace_array(tr);
7825
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007826 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007827 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007828 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04007829
7830 list_add(&tr->list, &ftrace_trace_arrays);
7831
7832 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007833 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007834
7835 return 0;
7836
7837 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04007838 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007839 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04007840 kfree(tr->name);
7841 kfree(tr);
7842
7843 out_unlock:
7844 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007845 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04007846
7847 return ret;
7848
7849}
7850
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007851static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007852{
7853 struct trace_array *tr;
7854 int found = 0;
7855 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007856 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007857
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007858 mutex_lock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007859 mutex_lock(&trace_types_lock);
7860
7861 ret = -ENODEV;
7862 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7863 if (tr->name && strcmp(tr->name, name) == 0) {
7864 found = 1;
7865 break;
7866 }
7867 }
7868 if (!found)
7869 goto out_unlock;
7870
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007871 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007872 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007873 goto out_unlock;
7874
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007875 list_del(&tr->list);
7876
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04007877 /* Disable all the flags that were enabled coming in */
7878 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7879 if ((1 << i) & ZEROED_TRACE_FLAGS)
7880 set_tracer_flag(tr, 1 << i, 0);
7881 }
7882
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05007883 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05307884 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007885 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09007886 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007887 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08007888 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04007889 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007890
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007891 for (i = 0; i < tr->nr_topts; i++) {
7892 kfree(tr->topts[i].topts);
7893 }
7894 kfree(tr->topts);
7895
Chunyu Hudb9108e02017-07-20 18:36:09 +08007896 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007897 kfree(tr->name);
7898 kfree(tr);
7899
7900 ret = 0;
7901
7902 out_unlock:
7903 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04007904 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04007905
7906 return ret;
7907}
7908
Steven Rostedt277ba042012-08-03 16:10:49 -04007909static __init void create_trace_instances(struct dentry *d_tracer)
7910{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05007911 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7912 instance_mkdir,
7913 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04007914 if (WARN_ON(!trace_instance_dir))
7915 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04007916}
7917
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007918static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007919init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007920{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007921 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007922 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007923
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05007924 trace_create_file("available_tracers", 0444, d_tracer,
7925 tr, &show_traces_fops);
7926
7927 trace_create_file("current_tracer", 0644, d_tracer,
7928 tr, &set_tracer_fops);
7929
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007930 trace_create_file("tracing_cpumask", 0644, d_tracer,
7931 tr, &tracing_cpumask_fops);
7932
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007933 trace_create_file("trace_options", 0644, d_tracer,
7934 tr, &tracing_iter_fops);
7935
7936 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007937 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007938
7939 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02007940 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007941
7942 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007943 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007944
7945 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7946 tr, &tracing_total_entries_fops);
7947
Wang YanQing238ae932013-05-26 16:52:01 +08007948 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007949 tr, &tracing_free_buffer_fops);
7950
7951 trace_create_file("trace_marker", 0220, d_tracer,
7952 tr, &tracing_mark_fops);
7953
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007954 file = __find_event_file(tr, "ftrace", "print");
7955 if (file && file->dir)
7956 trace_create_file("trigger", 0644, file->dir, file,
7957 &event_trigger_fops);
7958 tr->trace_marker_file = file;
7959
Steven Rostedtfa32e852016-07-06 15:25:08 -04007960 trace_create_file("trace_marker_raw", 0220, d_tracer,
7961 tr, &tracing_mark_raw_fops);
7962
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007963 trace_create_file("trace_clock", 0644, d_tracer, tr,
7964 &trace_clock_fops);
7965
7966 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007967 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007968
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007969 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
7970 &trace_time_stamp_mode_fops);
7971
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007972 create_trace_options_dir(tr);
7973
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007974#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05007975 trace_create_file("tracing_max_latency", 0644, d_tracer,
7976 &tr->max_latency, &tracing_max_lat_fops);
7977#endif
7978
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007979 if (ftrace_create_function_files(tr, d_tracer))
7980 WARN(1, "Could not allocate function filter files");
7981
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007982#ifdef CONFIG_TRACER_SNAPSHOT
7983 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007984 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007985#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007986
7987 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007988 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007989
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04007990 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007991}
7992
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13007993static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007994{
7995 struct vfsmount *mnt;
7996 struct file_system_type *type;
7997
7998 /*
7999 * To maintain backward compatibility for tools that mount
8000 * debugfs to get to the tracing facility, tracefs is automatically
8001 * mounted to the debugfs/tracing directory.
8002 */
8003 type = get_fs_type("tracefs");
8004 if (!type)
8005 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008006 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008007 put_filesystem(type);
8008 if (IS_ERR(mnt))
8009 return NULL;
8010 mntget(mnt);
8011
8012 return mnt;
8013}
8014
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008015/**
8016 * tracing_init_dentry - initialize top level trace array
8017 *
8018 * This is called when creating files or directories in the tracing
8019 * directory. It is called via fs_initcall() by any of the boot up code
8020 * and expects to return the dentry of the top level tracing directory.
8021 */
8022struct dentry *tracing_init_dentry(void)
8023{
8024 struct trace_array *tr = &global_trace;
8025
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008026 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008027 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008028 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008029
Jiaxing Wang8b129192015-11-06 16:04:16 +08008030 if (WARN_ON(!tracefs_initialized()) ||
8031 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8032 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008033 return ERR_PTR(-ENODEV);
8034
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008035 /*
8036 * As there may still be users that expect the tracing
8037 * files to exist in debugfs/tracing, we must automount
8038 * the tracefs file system there, so older tools still
8039 * work with the newer kerenl.
8040 */
8041 tr->dir = debugfs_create_automount("tracing", NULL,
8042 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008043 if (!tr->dir) {
8044 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8045 return ERR_PTR(-ENOMEM);
8046 }
8047
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008048 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008049}
8050
Jeremy Linton00f4b652017-05-31 16:56:43 -05008051extern struct trace_eval_map *__start_ftrace_eval_maps[];
8052extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008053
Jeremy Linton5f60b352017-05-31 16:56:47 -05008054static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008055{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008056 int len;
8057
Jeremy Linton02fd7f62017-05-31 16:56:42 -05008058 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008059 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008060}
8061
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008062#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008063static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008064{
Jeremy Linton99be6472017-05-31 16:56:44 -05008065 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008066 return;
8067
8068 /*
8069 * Modules with bad taint do not have events created, do
8070 * not bother with enums either.
8071 */
8072 if (trace_module_has_bad_taint(mod))
8073 return;
8074
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008075 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008076}
8077
Jeremy Linton681bec02017-05-31 16:56:53 -05008078#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008079static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008080{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008081 union trace_eval_map_item *map;
8082 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008083
Jeremy Linton99be6472017-05-31 16:56:44 -05008084 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008085 return;
8086
Jeremy Linton1793ed92017-05-31 16:56:46 -05008087 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008088
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008089 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008090
8091 while (map) {
8092 if (map->head.mod == mod)
8093 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05008094 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008095 last = &map->tail.next;
8096 map = map->tail.next;
8097 }
8098 if (!map)
8099 goto out;
8100
Jeremy Linton5f60b352017-05-31 16:56:47 -05008101 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008102 kfree(map);
8103 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05008104 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008105}
8106#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008107static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05008108#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008109
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008110static int trace_module_notify(struct notifier_block *self,
8111 unsigned long val, void *data)
8112{
8113 struct module *mod = data;
8114
8115 switch (val) {
8116 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008117 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008118 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008119 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008120 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008121 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008122 }
8123
8124 return 0;
8125}
8126
8127static struct notifier_block trace_module_nb = {
8128 .notifier_call = trace_module_notify,
8129 .priority = 0,
8130};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008131#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008132
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008133static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008134{
8135 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008136
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08008137 trace_access_lock_init();
8138
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008139 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008140 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09008141 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008142
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04008143 event_trace_init();
8144
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008145 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04008146 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008147
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008148 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04008149 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008150
Li Zefan339ae5d2009-04-17 10:34:30 +08008151 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008152 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02008153
Avadh Patel69abe6a2009-04-10 16:04:48 -04008154 trace_create_file("saved_cmdlines", 0444, d_tracer,
8155 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03008156
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008157 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8158 NULL, &tracing_saved_cmdlines_size_fops);
8159
Michael Sartain99c621d2017-07-05 22:07:15 -06008160 trace_create_file("saved_tgids", 0444, d_tracer,
8161 NULL, &tracing_saved_tgids_fops);
8162
Jeremy Linton5f60b352017-05-31 16:56:47 -05008163 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008164
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008165 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008166
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008167#ifdef CONFIG_MODULES
8168 register_module_notifier(&trace_module_nb);
8169#endif
8170
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008171#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008172 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8173 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008174#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008175
Steven Rostedt277ba042012-08-03 16:10:49 -04008176 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09008177
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008178 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05008179
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01008180 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008181}
8182
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008183static int trace_panic_handler(struct notifier_block *this,
8184 unsigned long event, void *unused)
8185{
Steven Rostedt944ac422008-10-23 19:26:08 -04008186 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008187 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008188 return NOTIFY_OK;
8189}
8190
8191static struct notifier_block trace_panic_notifier = {
8192 .notifier_call = trace_panic_handler,
8193 .next = NULL,
8194 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8195};
8196
8197static int trace_die_handler(struct notifier_block *self,
8198 unsigned long val,
8199 void *data)
8200{
8201 switch (val) {
8202 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04008203 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008204 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008205 break;
8206 default:
8207 break;
8208 }
8209 return NOTIFY_OK;
8210}
8211
8212static struct notifier_block trace_die_notifier = {
8213 .notifier_call = trace_die_handler,
8214 .priority = 200
8215};
8216
8217/*
8218 * printk is set to max of 1024, we really don't need it that big.
8219 * Nothing should be printing 1000 characters anyway.
8220 */
8221#define TRACE_MAX_PRINT 1000
8222
8223/*
8224 * Define here KERN_TRACE so that we have one place to modify
8225 * it if we decide to change what log level the ftrace dump
8226 * should be at.
8227 */
Steven Rostedt428aee12009-01-14 12:24:42 -05008228#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008229
Jason Wessel955b61e2010-08-05 09:22:23 -05008230void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008231trace_printk_seq(struct trace_seq *s)
8232{
8233 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008234 if (s->seq.len >= TRACE_MAX_PRINT)
8235 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008236
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05008237 /*
8238 * More paranoid code. Although the buffer size is set to
8239 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8240 * an extra layer of protection.
8241 */
8242 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8243 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008244
8245 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008246 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008247
8248 printk(KERN_TRACE "%s", s->buffer);
8249
Steven Rostedtf9520752009-03-02 14:04:40 -05008250 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008251}
8252
Jason Wessel955b61e2010-08-05 09:22:23 -05008253void trace_init_global_iter(struct trace_iterator *iter)
8254{
8255 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008256 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05008257 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008258 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07008259
8260 if (iter->trace && iter->trace->open)
8261 iter->trace->open(iter);
8262
8263 /* Annotate start of buffers if we had overruns */
8264 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8265 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8266
8267 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8268 if (trace_clocks[iter->tr->clock_id].in_ns)
8269 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05008270}
8271
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008272void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008273{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008274 /* use static because iter can be a bit big for the stack */
8275 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008276 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008277 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008278 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04008279 unsigned long flags;
8280 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008281
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008282 /* Only allow one dump user at a time. */
8283 if (atomic_inc_return(&dump_running) != 1) {
8284 atomic_dec(&dump_running);
8285 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04008286 }
8287
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008288 /*
8289 * Always turn off tracing when we dump.
8290 * We don't need to show trace output of what happens
8291 * between multiple crashes.
8292 *
8293 * If the user does a sysrq-z, then they can re-enable
8294 * tracing with echo 1 > tracing_on.
8295 */
8296 tracing_off();
8297
8298 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02008299 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008300
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08008301 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05008302 trace_init_global_iter(&iter);
8303
Steven Rostedtd7690412008-10-01 00:29:53 -04008304 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05308305 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04008306 }
8307
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008308 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008309
Török Edwinb54d3de2008-11-22 13:28:48 +02008310 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008311 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02008312
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008313 switch (oops_dump_mode) {
8314 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05008315 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008316 break;
8317 case DUMP_ORIG:
8318 iter.cpu_file = raw_smp_processor_id();
8319 break;
8320 case DUMP_NONE:
8321 goto out_enable;
8322 default:
8323 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05008324 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008325 }
8326
8327 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008328
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008329 /* Did function tracer already get disabled? */
8330 if (ftrace_is_dead()) {
8331 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8332 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8333 }
8334
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008335 /*
8336 * We need to stop all tracing on all CPUS to read the
8337 * the next buffer. This is a bit expensive, but is
8338 * not done often. We fill all what we can read,
8339 * and then release the locks again.
8340 */
8341
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008342 while (!trace_empty(&iter)) {
8343
8344 if (!cnt)
8345 printk(KERN_TRACE "---------------------------------\n");
8346
8347 cnt++;
8348
8349 /* reset all but tr, trace, and overruns */
8350 memset(&iter.seq, 0,
8351 sizeof(struct trace_iterator) -
8352 offsetof(struct trace_iterator, seq));
8353 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8354 iter.pos = -1;
8355
Jason Wessel955b61e2010-08-05 09:22:23 -05008356 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08008357 int ret;
8358
8359 ret = print_trace_line(&iter);
8360 if (ret != TRACE_TYPE_NO_CONSUME)
8361 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008362 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05008363 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008364
8365 trace_printk_seq(&iter.seq);
8366 }
8367
8368 if (!cnt)
8369 printk(KERN_TRACE " (ftrace buffer empty)\n");
8370 else
8371 printk(KERN_TRACE "---------------------------------\n");
8372
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008373 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008374 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008375
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008376 for_each_tracing_cpu(cpu) {
8377 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008378 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02008379 atomic_dec(&dump_running);
8380 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008381 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008382}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008383EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008384
Tom Zanussi7e465ba2017-09-22 14:58:20 -05008385int trace_run_command(const char *buf, int (*createfn)(int, char **))
8386{
8387 char **argv;
8388 int argc, ret;
8389
8390 argc = 0;
8391 ret = 0;
8392 argv = argv_split(GFP_KERNEL, buf, &argc);
8393 if (!argv)
8394 return -ENOMEM;
8395
8396 if (argc)
8397 ret = createfn(argc, argv);
8398
8399 argv_free(argv);
8400
8401 return ret;
8402}
8403
8404#define WRITE_BUFSIZE 4096
8405
8406ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8407 size_t count, loff_t *ppos,
8408 int (*createfn)(int, char **))
8409{
8410 char *kbuf, *buf, *tmp;
8411 int ret = 0;
8412 size_t done = 0;
8413 size_t size;
8414
8415 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8416 if (!kbuf)
8417 return -ENOMEM;
8418
8419 while (done < count) {
8420 size = count - done;
8421
8422 if (size >= WRITE_BUFSIZE)
8423 size = WRITE_BUFSIZE - 1;
8424
8425 if (copy_from_user(kbuf, buffer + done, size)) {
8426 ret = -EFAULT;
8427 goto out;
8428 }
8429 kbuf[size] = '\0';
8430 buf = kbuf;
8431 do {
8432 tmp = strchr(buf, '\n');
8433 if (tmp) {
8434 *tmp = '\0';
8435 size = tmp - buf + 1;
8436 } else {
8437 size = strlen(buf);
8438 if (done + size < count) {
8439 if (buf != kbuf)
8440 break;
8441 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8442 pr_warn("Line length is too long: Should be less than %d\n",
8443 WRITE_BUFSIZE - 2);
8444 ret = -EINVAL;
8445 goto out;
8446 }
8447 }
8448 done += size;
8449
8450 /* Remove comments */
8451 tmp = strchr(buf, '#');
8452
8453 if (tmp)
8454 *tmp = '\0';
8455
8456 ret = trace_run_command(buf, createfn);
8457 if (ret)
8458 goto out;
8459 buf += size;
8460
8461 } while (done < count);
8462 }
8463 ret = done;
8464
8465out:
8466 kfree(kbuf);
8467
8468 return ret;
8469}
8470
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008471__init static int tracer_alloc_buffers(void)
8472{
Steven Rostedt73c51622009-03-11 13:42:01 -04008473 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308474 int ret = -ENOMEM;
8475
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008476 /*
8477 * Make sure we don't accidently add more trace options
8478 * than we have bits for.
8479 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008480 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008481
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308482 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8483 goto out;
8484
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008485 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308486 goto out_free_buffer_mask;
8487
Steven Rostedt07d777f2011-09-22 14:01:55 -04008488 /* Only allocate trace_printk buffers if a trace_printk exists */
8489 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04008490 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04008491 trace_printk_init_buffers();
8492
Steven Rostedt73c51622009-03-11 13:42:01 -04008493 /* To save memory, keep the ring buffer size to its minimum */
8494 if (ring_buffer_expanded)
8495 ring_buf_size = trace_buf_size;
8496 else
8497 ring_buf_size = 1;
8498
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308499 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008500 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008501
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008502 raw_spin_lock_init(&global_trace.start_lock);
8503
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008504 /*
8505 * The prepare callbacks allocates some memory for the ring buffer. We
8506 * don't free the buffer if the if the CPU goes down. If we were to free
8507 * the buffer, then the user would lose any trace that was in the
8508 * buffer. The memory will be removed once the "instance" is removed.
8509 */
8510 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8511 "trace/RB:preapre", trace_rb_cpu_prepare,
8512 NULL);
8513 if (ret < 0)
8514 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008515 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03008516 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008517 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8518 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008519 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008520
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008521 if (trace_create_savedcmd() < 0)
8522 goto out_free_temp_buffer;
8523
Steven Rostedtab464282008-05-12 21:21:00 +02008524 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008525 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008526 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8527 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008528 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008529 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04008530
Steven Rostedt499e5472012-02-22 15:50:28 -05008531 if (global_trace.buffer_disabled)
8532 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008533
Steven Rostedte1e232c2014-02-10 23:38:46 -05008534 if (trace_boot_clock) {
8535 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8536 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008537 pr_warn("Trace clock %s not defined, going back to default\n",
8538 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05008539 }
8540
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008541 /*
8542 * register_tracer() might reference current_trace, so it
8543 * needs to be set before we register anything. This is
8544 * just a bootstrap of current_trace anyway.
8545 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008546 global_trace.current_trace = &nop_trace;
8547
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008548 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8549
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05008550 ftrace_init_global_array_ops(&global_trace);
8551
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008552 init_trace_flags_index(&global_trace);
8553
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008554 register_tracer(&nop_trace);
8555
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05008556 /* Function tracing may start here (via kernel command line) */
8557 init_function_trace();
8558
Steven Rostedt60a11772008-05-12 21:20:44 +02008559 /* All seems OK, enable tracing */
8560 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008561
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008562 atomic_notifier_chain_register(&panic_notifier_list,
8563 &trace_panic_notifier);
8564
8565 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008566
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008567 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8568
8569 INIT_LIST_HEAD(&global_trace.systems);
8570 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008571 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008572 list_add(&global_trace.list, &ftrace_trace_arrays);
8573
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08008574 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04008575
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008576 register_snapshot_cmd();
8577
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008578 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008579
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008580out_free_savedcmd:
8581 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008582out_free_temp_buffer:
8583 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008584out_rm_hp_state:
8585 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308586out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008587 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308588out_free_buffer_mask:
8589 free_cpumask_var(tracing_buffer_mask);
8590out:
8591 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008592}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008593
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008594void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008595{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008596 if (tracepoint_printk) {
8597 tracepoint_print_iter =
8598 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8599 if (WARN_ON(!tracepoint_print_iter))
8600 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008601 else
8602 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008603 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008604 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008605}
8606
8607void __init trace_init(void)
8608{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008609 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008610}
8611
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008612__init static int clear_boot_tracer(void)
8613{
8614 /*
8615 * The default tracer at boot buffer is an init section.
8616 * This function is called in lateinit. If we did not
8617 * find the boot tracer, then clear it out, to prevent
8618 * later registration from accessing the buffer that is
8619 * about to be freed.
8620 */
8621 if (!default_bootup_tracer)
8622 return 0;
8623
8624 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8625 default_bootup_tracer);
8626 default_bootup_tracer = NULL;
8627
8628 return 0;
8629}
8630
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008631fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04008632late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01008633
8634#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8635__init static int tracing_set_default_clock(void)
8636{
8637 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01008638 if (!trace_boot_clock && !sched_clock_stable()) {
Chris Wilson3fd49c92018-03-30 16:01:31 +01008639 printk(KERN_WARNING
8640 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8641 "If you want to keep using the local clock, then add:\n"
8642 " \"trace_clock=local\"\n"
8643 "on the kernel command line\n");
8644 tracing_set_clock(&global_trace, "global");
8645 }
8646
8647 return 0;
8648}
8649late_initcall_sync(tracing_set_default_clock);
8650#endif