blob: ccd759eaad79f8d4664e2912801936aec2da1f9d [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
20#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040021#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050022#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020023#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050024#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020025#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020026#include <linux/hardirq.h>
27#include <linux/linkage.h>
28#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040029#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020030#include <linux/ftrace.h>
31#include <linux/module.h>
32#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050033#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040034#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010035#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050036#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080037#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020039#include <linux/ctype.h>
40#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020041#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050042#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080044#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010045#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060046#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020047
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020048#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050049#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020050
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010051/*
Steven Rostedt73c51622009-03-11 13:42:01 -040052 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
54 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050055bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040056
57/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010058 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010061 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062 * at the same time, giving false positive or negative results.
63 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010064static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010065
Steven Rostedtb2821ae2009-02-02 21:38:32 -050066/*
67 * If a tracer is running, we do not want to run SELFTEST.
68 */
Li Zefan020e5f82009-07-01 10:47:05 +080069bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050071/* Pipe tracepoints to printk */
72struct trace_iterator *tracepoint_print_iter;
73int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050074static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050075
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010076/* For tracers that don't implement custom flags */
77static struct tracer_opt dummy_tracer_opt[] = {
78 { }
79};
80
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050081static int
82dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010083{
84 return 0;
85}
Steven Rostedt0f048702008-11-05 16:05:44 -050086
87/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040088 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
91 */
Joel Fernandesd914ba32017-06-26 19:01:55 -070092static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -040093
94/*
Steven Rostedt0f048702008-11-05 16:05:44 -050095 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
99 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100100static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500101
Jason Wessel955b61e2010-08-05 09:22:23 -0500102cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200103
Steven Rostedt944ac422008-10-23 19:26:08 -0400104/*
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 *
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
112 *
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400118 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200119
120enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400121
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400122/* When set, tracing will stop when a WARN*() is hit */
123int __disable_trace_on_warning;
124
Jeremy Linton681bec02017-05-31 16:56:53 -0500125#ifdef CONFIG_TRACE_EVAL_MAP_FILE
126/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500127struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400128 struct module *mod;
129 unsigned long length;
130};
131
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500132union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400133
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500134struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400135 /*
136 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500137 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400138 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500139 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400140 const char *end; /* points to NULL */
141};
142
Jeremy Linton1793ed92017-05-31 16:56:46 -0500143static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400144
145/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500146 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500150 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400151 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500152union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500153 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400156};
157
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500158static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500159#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400160
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500161static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500162
Li Zefanee6c2c12009-09-18 14:06:47 +0800163#define MAX_TRACER_SIZE 100
164static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500165static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100166
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500167static bool allocate_snapshot;
168
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200169static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100170{
Chen Gang67012ab2013-04-08 12:06:44 +0800171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500172 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400173 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500174 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100175 return 1;
176}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200177__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100178
Steven Rostedt944ac422008-10-23 19:26:08 -0400179static int __init set_ftrace_dump_on_oops(char *str)
180{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
183 return 1;
184 }
185
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
188 return 1;
189 }
190
191 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400192}
193__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195static int __init stop_trace_on_warning(char *str)
196{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400199 return 1;
200}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200201__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400202
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400203static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500204{
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
208 return 1;
209}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400210__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500211
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400212
213static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400214
215static int __init set_trace_boot_options(char *str)
216{
Chen Gang67012ab2013-04-08 12:06:44 +0800217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400218 return 0;
219}
220__setup("trace_options=", set_trace_boot_options);
221
Steven Rostedte1e232c2014-02-10 23:38:46 -0500222static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223static char *trace_boot_clock __initdata;
224
225static int __init set_trace_boot_clock(char *str)
226{
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
229 return 0;
230}
231__setup("trace_clock=", set_trace_boot_clock);
232
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500233static int __init set_tracepoint_printk(char *str)
234{
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
237 return 1;
238}
239__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400240
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100241unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200242{
243 nsec += 500;
244 do_div(nsec, 1000);
245 return nsec;
246}
247
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400248/* trace_flags holds trace_options default values */
249#define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
255
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400256/* trace_options that are only supported by global_trace */
257#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
259
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400260/* trace_flags that are default zero for instances */
261#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400263
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200264/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200267 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400268static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
270};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200271
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400272LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200273
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400274int trace_array_get(struct trace_array *this_tr)
275{
276 struct trace_array *tr;
277 int ret = -ENODEV;
278
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
281 if (tr == this_tr) {
282 tr->ref++;
283 ret = 0;
284 break;
285 }
286 }
287 mutex_unlock(&trace_types_lock);
288
289 return ret;
290}
291
292static void __trace_array_put(struct trace_array *this_tr)
293{
294 WARN_ON(!this_tr->ref);
295 this_tr->ref--;
296}
297
298void trace_array_put(struct trace_array *this_tr)
299{
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
303}
304
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400305int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
308{
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400311 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500312 return 1;
313 }
314
315 return 0;
316}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500317
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400318void trace_free_pid_list(struct trace_pid_list *pid_list)
319{
320 vfree(pid_list->pids);
321 kfree(pid_list);
322}
323
Steven Rostedtd8275c42016-04-14 12:15:22 -0400324/**
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
328 *
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 */
331bool
332trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333{
334 /*
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
337 */
338 if (search_pid >= filtered_pids->pid_max)
339 return false;
340
341 return test_bit(search_pid, filtered_pids->pids);
342}
343
344/**
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
348 *
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
352 */
353bool
354trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355{
356 /*
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
359 */
360 if (!filtered_pids)
361 return false;
362
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
364}
365
366/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
371 *
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
376 * of a task.
377 */
378void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
381{
382 if (!pid_list)
383 return;
384
385 /* For forks, we only add if the forking task is listed */
386 if (self) {
387 if (!trace_find_filtered_pid(pid_list, self->pid))
388 return;
389 }
390
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
393 return;
394
395 /* "self" is set for forks, and NULL for exits */
396 if (self)
397 set_bit(task->pid, pid_list->pids);
398 else
399 clear_bit(task->pid, pid_list->pids);
400}
401
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400402/**
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
407 *
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
410 *
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
413 */
414void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
415{
416 unsigned long pid = (unsigned long)v;
417
418 (*pos)++;
419
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
422
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
426
427 return NULL;
428}
429
430/**
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
434 *
435 * This is used by seq_file "start" operation to start the iteration
436 * of listing pids.
437 *
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
440 */
441void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
442{
443 unsigned long pid;
444 loff_t l = 0;
445
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
448 return NULL;
449
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
453 ;
454 return (void *)pid;
455}
456
457/**
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
461 *
462 * Can be directly used by seq_file operations to display the current
463 * pid value.
464 */
465int trace_pid_show(struct seq_file *m, void *v)
466{
467 unsigned long pid = (unsigned long)v - 1;
468
469 seq_printf(m, "%lu\n", pid);
470 return 0;
471}
472
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400473/* 128 should be much more than enough */
474#define PID_BUF_SIZE 127
475
476int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
479{
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
482 unsigned long val;
483 int nr_pids = 0;
484 ssize_t read = 0;
485 ssize_t ret = 0;
486 loff_t pos;
487 pid_t pid;
488
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
490 return -ENOMEM;
491
492 /*
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
496 * not modified.
497 */
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499 if (!pid_list)
500 return -ENOMEM;
501
502 pid_list->pid_max = READ_ONCE(pid_max);
503
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
506 pid_list->pid_max = filtered_pids->pid_max;
507
508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509 if (!pid_list->pids) {
510 kfree(pid_list);
511 return -ENOMEM;
512 }
513
514 if (filtered_pids) {
515 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000516 for_each_set_bit(pid, filtered_pids->pids,
517 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400518 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400519 nr_pids++;
520 }
521 }
522
523 while (cnt > 0) {
524
525 pos = 0;
526
527 ret = trace_get_user(&parser, ubuf, cnt, &pos);
528 if (ret < 0 || !trace_parser_loaded(&parser))
529 break;
530
531 read += ret;
532 ubuf += ret;
533 cnt -= ret;
534
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700573 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400574 return trace_clock_local();
575
Alexander Z Lam94571582013-08-02 18:36:16 -0700576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400578
579 return ts;
580}
581
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100582u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
Steven Rostedt90369902008-11-05 16:05:44 -0500596int tracing_is_enabled(void)
597{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500605}
606
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200607/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200616 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400618
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200620
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200621/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200622static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200623
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200624/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200625 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200626 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700627DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200628
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500657 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
Steven Rostedtae3b5092013-01-23 15:22:59 -0500663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500673 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400719
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400730{
731}
732
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400733#endif
734
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400760void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200777/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700793 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800818 int pc;
819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800821 return 0;
822
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800823 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500824
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800868 int pc;
869
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800871 return 0;
872
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800873 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500874
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500896#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussia35873a2019-02-13 17:42:45 -0600897void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500898{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500908 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500919 return;
920 }
921
922 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -0600923 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500924 local_irq_restore(flags);
925}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400926
Tom Zanussia35873a2019-02-13 17:42:45 -0600927void tracing_snapshot_instance(struct trace_array *tr)
928{
929 tracing_snapshot_instance_cond(tr, NULL);
930}
931
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400932/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +0800933 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -0400934 *
935 * This causes a swap between the snapshot buffer and the current live
936 * tracing buffer. You can use this to take snapshots of the live
937 * trace when some condition is triggered, but continue to trace.
938 *
939 * Note, make sure to allocate the snapshot with either
940 * a tracing_snapshot_alloc(), or by doing it manually
941 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
942 *
943 * If the snapshot buffer is not allocated, it will stop tracing.
944 * Basically making a permanent snapshot.
945 */
946void tracing_snapshot(void)
947{
948 struct trace_array *tr = &global_trace;
949
950 tracing_snapshot_instance(tr);
951}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500952EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500953
Tom Zanussia35873a2019-02-13 17:42:45 -0600954/**
955 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
956 * @tr: The tracing instance to snapshot
957 * @cond_data: The data to be tested conditionally, and possibly saved
958 *
959 * This is the same as tracing_snapshot() except that the snapshot is
960 * conditional - the snapshot will only happen if the
961 * cond_snapshot.update() implementation receiving the cond_data
962 * returns true, which means that the trace array's cond_snapshot
963 * update() operation used the cond_data to determine whether the
964 * snapshot should be taken, and if it was, presumably saved it along
965 * with the snapshot.
966 */
967void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
968{
969 tracing_snapshot_instance_cond(tr, cond_data);
970}
971EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
972
973/**
974 * tracing_snapshot_cond_data - get the user data associated with a snapshot
975 * @tr: The tracing instance
976 *
977 * When the user enables a conditional snapshot using
978 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
979 * with the snapshot. This accessor is used to retrieve it.
980 *
981 * Should not be called from cond_snapshot.update(), since it takes
982 * the tr->max_lock lock, which the code calling
983 * cond_snapshot.update() has already done.
984 *
985 * Returns the cond_data associated with the trace array's snapshot.
986 */
987void *tracing_cond_snapshot_data(struct trace_array *tr)
988{
989 void *cond_data = NULL;
990
991 arch_spin_lock(&tr->max_lock);
992
993 if (tr->cond_snapshot)
994 cond_data = tr->cond_snapshot->cond_data;
995
996 arch_spin_unlock(&tr->max_lock);
997
998 return cond_data;
999}
1000EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1001
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001002static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1003 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001004static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1005
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001006int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001007{
1008 int ret;
1009
1010 if (!tr->allocated_snapshot) {
1011
1012 /* allocate spare buffer */
1013 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1014 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1015 if (ret < 0)
1016 return ret;
1017
1018 tr->allocated_snapshot = true;
1019 }
1020
1021 return 0;
1022}
1023
Fabian Frederickad1438a2014-04-17 21:44:42 +02001024static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001025{
1026 /*
1027 * We don't free the ring buffer. instead, resize it because
1028 * The max_tr ring buffer has some state (e.g. ring->clock) and
1029 * we want preserve it.
1030 */
1031 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1032 set_buffer_entries(&tr->max_buffer, 1);
1033 tracing_reset_online_cpus(&tr->max_buffer);
1034 tr->allocated_snapshot = false;
1035}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001036
1037/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001038 * tracing_alloc_snapshot - allocate snapshot buffer.
1039 *
1040 * This only allocates the snapshot buffer if it isn't already
1041 * allocated - it doesn't also take a snapshot.
1042 *
1043 * This is meant to be used in cases where the snapshot buffer needs
1044 * to be set up for events that can't sleep but need to be able to
1045 * trigger a snapshot.
1046 */
1047int tracing_alloc_snapshot(void)
1048{
1049 struct trace_array *tr = &global_trace;
1050 int ret;
1051
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001052 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001053 WARN_ON(ret < 0);
1054
1055 return ret;
1056}
1057EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1058
1059/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001060 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001061 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001062 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001063 * snapshot buffer if it isn't already allocated. Use this only
1064 * where it is safe to sleep, as the allocation may sleep.
1065 *
1066 * This causes a swap between the snapshot buffer and the current live
1067 * tracing buffer. You can use this to take snapshots of the live
1068 * trace when some condition is triggered, but continue to trace.
1069 */
1070void tracing_snapshot_alloc(void)
1071{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001072 int ret;
1073
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001074 ret = tracing_alloc_snapshot();
1075 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001076 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001077
1078 tracing_snapshot();
1079}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001080EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001081
1082/**
1083 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1084 * @tr: The tracing instance
1085 * @cond_data: User data to associate with the snapshot
1086 * @update: Implementation of the cond_snapshot update function
1087 *
1088 * Check whether the conditional snapshot for the given instance has
1089 * already been enabled, or if the current tracer is already using a
1090 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1091 * save the cond_data and update function inside.
1092 *
1093 * Returns 0 if successful, error otherwise.
1094 */
1095int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1096 cond_update_fn_t update)
1097{
1098 struct cond_snapshot *cond_snapshot;
1099 int ret = 0;
1100
1101 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1102 if (!cond_snapshot)
1103 return -ENOMEM;
1104
1105 cond_snapshot->cond_data = cond_data;
1106 cond_snapshot->update = update;
1107
1108 mutex_lock(&trace_types_lock);
1109
1110 ret = tracing_alloc_snapshot_instance(tr);
1111 if (ret)
1112 goto fail_unlock;
1113
1114 if (tr->current_trace->use_max_tr) {
1115 ret = -EBUSY;
1116 goto fail_unlock;
1117 }
1118
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001119 /*
1120 * The cond_snapshot can only change to NULL without the
1121 * trace_types_lock. We don't care if we race with it going
1122 * to NULL, but we want to make sure that it's not set to
1123 * something other than NULL when we get here, which we can
1124 * do safely with only holding the trace_types_lock and not
1125 * having to take the max_lock.
1126 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001127 if (tr->cond_snapshot) {
1128 ret = -EBUSY;
1129 goto fail_unlock;
1130 }
1131
1132 arch_spin_lock(&tr->max_lock);
1133 tr->cond_snapshot = cond_snapshot;
1134 arch_spin_unlock(&tr->max_lock);
1135
1136 mutex_unlock(&trace_types_lock);
1137
1138 return ret;
1139
1140 fail_unlock:
1141 mutex_unlock(&trace_types_lock);
1142 kfree(cond_snapshot);
1143 return ret;
1144}
1145EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1146
1147/**
1148 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1149 * @tr: The tracing instance
1150 *
1151 * Check whether the conditional snapshot for the given instance is
1152 * enabled; if so, free the cond_snapshot associated with it,
1153 * otherwise return -EINVAL.
1154 *
1155 * Returns 0 if successful, error otherwise.
1156 */
1157int tracing_snapshot_cond_disable(struct trace_array *tr)
1158{
1159 int ret = 0;
1160
1161 arch_spin_lock(&tr->max_lock);
1162
1163 if (!tr->cond_snapshot)
1164 ret = -EINVAL;
1165 else {
1166 kfree(tr->cond_snapshot);
1167 tr->cond_snapshot = NULL;
1168 }
1169
1170 arch_spin_unlock(&tr->max_lock);
1171
1172 return ret;
1173}
1174EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001175#else
1176void tracing_snapshot(void)
1177{
1178 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1179}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001180EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001181void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1182{
1183 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1184}
1185EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001186int tracing_alloc_snapshot(void)
1187{
1188 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1189 return -ENODEV;
1190}
1191EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001192void tracing_snapshot_alloc(void)
1193{
1194 /* Give warning */
1195 tracing_snapshot();
1196}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001197EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001198void *tracing_cond_snapshot_data(struct trace_array *tr)
1199{
1200 return NULL;
1201}
1202EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1203int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1204{
1205 return -ENODEV;
1206}
1207EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1208int tracing_snapshot_cond_disable(struct trace_array *tr)
1209{
1210 return false;
1211}
1212EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001213#endif /* CONFIG_TRACER_SNAPSHOT */
1214
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001215void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001216{
1217 if (tr->trace_buffer.buffer)
1218 ring_buffer_record_off(tr->trace_buffer.buffer);
1219 /*
1220 * This flag is looked at when buffers haven't been allocated
1221 * yet, or by some tracers (like irqsoff), that just want to
1222 * know if the ring buffer has been disabled, but it can handle
1223 * races of where it gets disabled but we still do a record.
1224 * As the check is in the fast path of the tracers, it is more
1225 * important to be fast than accurate.
1226 */
1227 tr->buffer_disabled = 1;
1228 /* Make the flag seen by readers */
1229 smp_wmb();
1230}
1231
Steven Rostedt499e5472012-02-22 15:50:28 -05001232/**
1233 * tracing_off - turn off tracing buffers
1234 *
1235 * This function stops the tracing buffers from recording data.
1236 * It does not disable any overhead the tracers themselves may
1237 * be causing. This function simply causes all recording to
1238 * the ring buffers to fail.
1239 */
1240void tracing_off(void)
1241{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001242 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001243}
1244EXPORT_SYMBOL_GPL(tracing_off);
1245
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001246void disable_trace_on_warning(void)
1247{
1248 if (__disable_trace_on_warning)
1249 tracing_off();
1250}
1251
Steven Rostedt499e5472012-02-22 15:50:28 -05001252/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001253 * tracer_tracing_is_on - show real state of ring buffer enabled
1254 * @tr : the trace array to know if ring buffer is enabled
1255 *
1256 * Shows real state of the ring buffer if it is enabled or not.
1257 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001258bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001259{
1260 if (tr->trace_buffer.buffer)
1261 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1262 return !tr->buffer_disabled;
1263}
1264
Steven Rostedt499e5472012-02-22 15:50:28 -05001265/**
1266 * tracing_is_on - show state of ring buffers enabled
1267 */
1268int tracing_is_on(void)
1269{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001270 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001271}
1272EXPORT_SYMBOL_GPL(tracing_is_on);
1273
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001274static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001275{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001276 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001277
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001278 if (!str)
1279 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001280 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001281 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001282 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001283 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001284 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001285 return 1;
1286}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001287__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001288
Tim Bird0e950172010-02-25 15:36:43 -08001289static int __init set_tracing_thresh(char *str)
1290{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001291 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001292 int ret;
1293
1294 if (!str)
1295 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001296 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001297 if (ret < 0)
1298 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001299 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001300 return 1;
1301}
1302__setup("tracing_thresh=", set_tracing_thresh);
1303
Steven Rostedt57f50be2008-05-12 21:20:44 +02001304unsigned long nsecs_to_usecs(unsigned long nsecs)
1305{
1306 return nsecs / 1000;
1307}
1308
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001309/*
1310 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001311 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001312 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001313 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001314 */
1315#undef C
1316#define C(a, b) b
1317
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001318/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001319static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001320 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001321 NULL
1322};
1323
Zhaolei5079f322009-08-25 16:12:56 +08001324static struct {
1325 u64 (*func)(void);
1326 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001327 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001328} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001329 { trace_clock_local, "local", 1 },
1330 { trace_clock_global, "global", 1 },
1331 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001332 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001333 { trace_clock, "perf", 1 },
1334 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001335 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001336 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001337 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001338};
1339
Tom Zanussi860f9f62018-01-15 20:51:48 -06001340bool trace_clock_in_ns(struct trace_array *tr)
1341{
1342 if (trace_clocks[tr->clock_id].in_ns)
1343 return true;
1344
1345 return false;
1346}
1347
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001348/*
1349 * trace_parser_get_init - gets the buffer for trace parser
1350 */
1351int trace_parser_get_init(struct trace_parser *parser, int size)
1352{
1353 memset(parser, 0, sizeof(*parser));
1354
1355 parser->buffer = kmalloc(size, GFP_KERNEL);
1356 if (!parser->buffer)
1357 return 1;
1358
1359 parser->size = size;
1360 return 0;
1361}
1362
1363/*
1364 * trace_parser_put - frees the buffer for trace parser
1365 */
1366void trace_parser_put(struct trace_parser *parser)
1367{
1368 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001369 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001370}
1371
1372/*
1373 * trace_get_user - reads the user input string separated by space
1374 * (matched by isspace(ch))
1375 *
1376 * For each string found the 'struct trace_parser' is updated,
1377 * and the function returns.
1378 *
1379 * Returns number of bytes read.
1380 *
1381 * See kernel/trace/trace.h for 'struct trace_parser' details.
1382 */
1383int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1384 size_t cnt, loff_t *ppos)
1385{
1386 char ch;
1387 size_t read = 0;
1388 ssize_t ret;
1389
1390 if (!*ppos)
1391 trace_parser_clear(parser);
1392
1393 ret = get_user(ch, ubuf++);
1394 if (ret)
1395 goto out;
1396
1397 read++;
1398 cnt--;
1399
1400 /*
1401 * The parser is not finished with the last write,
1402 * continue reading the user input without skipping spaces.
1403 */
1404 if (!parser->cont) {
1405 /* skip white space */
1406 while (cnt && isspace(ch)) {
1407 ret = get_user(ch, ubuf++);
1408 if (ret)
1409 goto out;
1410 read++;
1411 cnt--;
1412 }
1413
Changbin Du76638d92018-01-16 17:02:29 +08001414 parser->idx = 0;
1415
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001416 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001417 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001418 *ppos += read;
1419 ret = read;
1420 goto out;
1421 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001422 }
1423
1424 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001425 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001426 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001427 parser->buffer[parser->idx++] = ch;
1428 else {
1429 ret = -EINVAL;
1430 goto out;
1431 }
1432 ret = get_user(ch, ubuf++);
1433 if (ret)
1434 goto out;
1435 read++;
1436 cnt--;
1437 }
1438
1439 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001440 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001441 parser->buffer[parser->idx] = 0;
1442 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001443 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001444 parser->cont = true;
1445 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001446 /* Make sure the parsed string always terminates with '\0'. */
1447 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001448 } else {
1449 ret = -EINVAL;
1450 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001451 }
1452
1453 *ppos += read;
1454 ret = read;
1455
1456out:
1457 return ret;
1458}
1459
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001460/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001461static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001462{
1463 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001464
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001465 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001466 return -EBUSY;
1467
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001468 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001469 if (cnt > len)
1470 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001471 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001472
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001473 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001474 return cnt;
1475}
1476
Tim Bird0e950172010-02-25 15:36:43 -08001477unsigned long __read_mostly tracing_thresh;
1478
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001479#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001480/*
1481 * Copy the new maximum trace into the separate maximum-trace
1482 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001483 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001484 */
1485static void
1486__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1487{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001488 struct trace_buffer *trace_buf = &tr->trace_buffer;
1489 struct trace_buffer *max_buf = &tr->max_buffer;
1490 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1491 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001492
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001493 max_buf->cpu = cpu;
1494 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001495
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001496 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001497 max_data->critical_start = data->critical_start;
1498 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001499
Tom Zanussi85f726a2019-03-05 10:12:00 -06001500 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001501 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001502 /*
1503 * If tsk == current, then use current_uid(), as that does not use
1504 * RCU. The irq tracer can be called out of RCU scope.
1505 */
1506 if (tsk == current)
1507 max_data->uid = current_uid();
1508 else
1509 max_data->uid = task_uid(tsk);
1510
Steven Rostedt8248ac02009-09-02 12:27:41 -04001511 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1512 max_data->policy = tsk->policy;
1513 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001514
1515 /* record this tasks comm */
1516 tracing_record_cmdline(tsk);
1517}
1518
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001519/**
1520 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1521 * @tr: tracer
1522 * @tsk: the task with the latency
1523 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001524 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001525 *
1526 * Flip the buffers between the @tr and the max_tr and record information
1527 * about which task was the cause of this latency.
1528 */
Ingo Molnare309b412008-05-12 21:20:51 +02001529void
Tom Zanussia35873a2019-02-13 17:42:45 -06001530update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1531 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001533 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001534 return;
1535
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001536 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001537
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001538 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001539 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001540 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001541 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001542 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001543
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001544 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001545
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001546 /* Inherit the recordable setting from trace_buffer */
1547 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1548 ring_buffer_record_on(tr->max_buffer.buffer);
1549 else
1550 ring_buffer_record_off(tr->max_buffer.buffer);
1551
Tom Zanussia35873a2019-02-13 17:42:45 -06001552#ifdef CONFIG_TRACER_SNAPSHOT
1553 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1554 goto out_unlock;
1555#endif
Gustavo A. R. Silva08ae88f2018-02-09 11:53:16 -06001556 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001557
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001558 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001559
1560 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001561 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001562}
1563
1564/**
1565 * update_max_tr_single - only copy one trace over, and reset the rest
1566 * @tr - tracer
1567 * @tsk - task with the latency
1568 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001569 *
1570 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001571 */
Ingo Molnare309b412008-05-12 21:20:51 +02001572void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001573update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1574{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001575 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001577 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001578 return;
1579
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001580 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001581 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001582 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001583 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001584 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001585 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001586
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001587 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001589 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001590
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001591 if (ret == -EBUSY) {
1592 /*
1593 * We failed to swap the buffer due to a commit taking
1594 * place on this CPU. We fail to record, but we reset
1595 * the max trace buffer (no one writes directly to it)
1596 * and flag that it failed.
1597 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001598 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001599 "Failed to swap buffers due to commit in progress\n");
1600 }
1601
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001602 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001603
1604 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001605 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001607#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001608
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001609static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001610{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001611 /* Iterators are static, they should be filled or empty */
1612 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001613 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001614
Rabin Vincente30f53a2014-11-10 19:46:34 +01001615 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1616 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001617}
1618
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001619#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001620static bool selftests_can_run;
1621
1622struct trace_selftests {
1623 struct list_head list;
1624 struct tracer *type;
1625};
1626
1627static LIST_HEAD(postponed_selftests);
1628
1629static int save_selftest(struct tracer *type)
1630{
1631 struct trace_selftests *selftest;
1632
1633 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1634 if (!selftest)
1635 return -ENOMEM;
1636
1637 selftest->type = type;
1638 list_add(&selftest->list, &postponed_selftests);
1639 return 0;
1640}
1641
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001642static int run_tracer_selftest(struct tracer *type)
1643{
1644 struct trace_array *tr = &global_trace;
1645 struct tracer *saved_tracer = tr->current_trace;
1646 int ret;
1647
1648 if (!type->selftest || tracing_selftest_disabled)
1649 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001650
1651 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001652 * If a tracer registers early in boot up (before scheduling is
1653 * initialized and such), then do not run its selftests yet.
1654 * Instead, run it a little later in the boot process.
1655 */
1656 if (!selftests_can_run)
1657 return save_selftest(type);
1658
1659 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001660 * Run a selftest on this tracer.
1661 * Here we reset the trace buffer, and set the current
1662 * tracer to be this tracer. The tracer can then run some
1663 * internal tracing to verify that everything is in order.
1664 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001665 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001666 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001667
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001668 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001669
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001670#ifdef CONFIG_TRACER_MAX_TRACE
1671 if (type->use_max_tr) {
1672 /* If we expanded the buffers, make sure the max is expanded too */
1673 if (ring_buffer_expanded)
1674 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1675 RING_BUFFER_ALL_CPUS);
1676 tr->allocated_snapshot = true;
1677 }
1678#endif
1679
1680 /* the test is responsible for initializing and enabling */
1681 pr_info("Testing tracer %s: ", type->name);
1682 ret = type->selftest(type, tr);
1683 /* the test is responsible for resetting too */
1684 tr->current_trace = saved_tracer;
1685 if (ret) {
1686 printk(KERN_CONT "FAILED!\n");
1687 /* Add the warning after printing 'FAILED' */
1688 WARN_ON(1);
1689 return -1;
1690 }
1691 /* Only reset on passing, to avoid touching corrupted buffers */
1692 tracing_reset_online_cpus(&tr->trace_buffer);
1693
1694#ifdef CONFIG_TRACER_MAX_TRACE
1695 if (type->use_max_tr) {
1696 tr->allocated_snapshot = false;
1697
1698 /* Shrink the max buffer again */
1699 if (ring_buffer_expanded)
1700 ring_buffer_resize(tr->max_buffer.buffer, 1,
1701 RING_BUFFER_ALL_CPUS);
1702 }
1703#endif
1704
1705 printk(KERN_CONT "PASSED\n");
1706 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001707}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001708
1709static __init int init_trace_selftests(void)
1710{
1711 struct trace_selftests *p, *n;
1712 struct tracer *t, **last;
1713 int ret;
1714
1715 selftests_can_run = true;
1716
1717 mutex_lock(&trace_types_lock);
1718
1719 if (list_empty(&postponed_selftests))
1720 goto out;
1721
1722 pr_info("Running postponed tracer tests:\n");
1723
1724 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1725 ret = run_tracer_selftest(p->type);
1726 /* If the test fails, then warn and remove from available_tracers */
1727 if (ret < 0) {
1728 WARN(1, "tracer: %s failed selftest, disabling\n",
1729 p->type->name);
1730 last = &trace_types;
1731 for (t = trace_types; t; t = t->next) {
1732 if (t == p->type) {
1733 *last = t->next;
1734 break;
1735 }
1736 last = &t->next;
1737 }
1738 }
1739 list_del(&p->list);
1740 kfree(p);
1741 }
1742
1743 out:
1744 mutex_unlock(&trace_types_lock);
1745
1746 return 0;
1747}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04001748core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001749#else
1750static inline int run_tracer_selftest(struct tracer *type)
1751{
1752 return 0;
1753}
1754#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001755
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001756static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1757
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001758static void __init apply_trace_boot_options(void);
1759
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001760/**
1761 * register_tracer - register a tracer with the ftrace system.
1762 * @type - the plugin for the tracer
1763 *
1764 * Register a new plugin tracer.
1765 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001766int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001767{
1768 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001769 int ret = 0;
1770
1771 if (!type->name) {
1772 pr_info("Tracer must have a name\n");
1773 return -1;
1774 }
1775
Dan Carpenter24a461d2010-07-10 12:06:44 +02001776 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001777 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1778 return -1;
1779 }
1780
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001781 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001782
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001783 tracing_selftest_running = true;
1784
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001785 for (t = trace_types; t; t = t->next) {
1786 if (strcmp(type->name, t->name) == 0) {
1787 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001788 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001789 type->name);
1790 ret = -1;
1791 goto out;
1792 }
1793 }
1794
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001795 if (!type->set_flag)
1796 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08001797 if (!type->flags) {
1798 /*allocate a dummy tracer_flags*/
1799 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08001800 if (!type->flags) {
1801 ret = -ENOMEM;
1802 goto out;
1803 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08001804 type->flags->val = 0;
1805 type->flags->opts = dummy_tracer_opt;
1806 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001807 if (!type->flags->opts)
1808 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001809
Chunyu Hud39cdd22016-03-08 21:37:01 +08001810 /* store the tracer for __set_tracer_option */
1811 type->flags->trace = type;
1812
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001813 ret = run_tracer_selftest(type);
1814 if (ret < 0)
1815 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001816
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001817 type->next = trace_types;
1818 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001819 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001820
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001821 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001822 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001823 mutex_unlock(&trace_types_lock);
1824
Steven Rostedtdac74942009-02-05 01:13:38 -05001825 if (ret || !default_bootup_tracer)
1826 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001827
Li Zefanee6c2c12009-09-18 14:06:47 +08001828 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001829 goto out_unlock;
1830
1831 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1832 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001833 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001834 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001835
1836 apply_trace_boot_options();
1837
Steven Rostedtdac74942009-02-05 01:13:38 -05001838 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001839 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001840#ifdef CONFIG_FTRACE_STARTUP_TEST
1841 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1842 type->name);
1843#endif
1844
1845 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001846 return ret;
1847}
1848
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001849void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001850{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001851 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001852
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001853 if (!buffer)
1854 return;
1855
Steven Rostedtf6339032009-09-04 12:35:16 -04001856 ring_buffer_record_disable(buffer);
1857
1858 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001859 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04001860 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001861
1862 ring_buffer_record_enable(buffer);
1863}
1864
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001865void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001866{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001867 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001868 int cpu;
1869
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001870 if (!buffer)
1871 return;
1872
Steven Rostedt621968c2009-09-04 12:02:35 -04001873 ring_buffer_record_disable(buffer);
1874
1875 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08001876 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04001877
Alexander Z Lam94571582013-08-02 18:36:16 -07001878 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001879
1880 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001881 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001882
1883 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001884}
1885
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001886/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001887void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001888{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001889 struct trace_array *tr;
1890
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001891 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04001892 if (!tr->clear_trace)
1893 continue;
1894 tr->clear_trace = false;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001895 tracing_reset_online_cpus(&tr->trace_buffer);
1896#ifdef CONFIG_TRACER_MAX_TRACE
1897 tracing_reset_online_cpus(&tr->max_buffer);
1898#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001899 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001900}
1901
Joel Fernandesd914ba32017-06-26 19:01:55 -07001902static int *tgid_map;
1903
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001904#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001905#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001906static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001907struct saved_cmdlines_buffer {
1908 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1909 unsigned *map_cmdline_to_pid;
1910 unsigned cmdline_num;
1911 int cmdline_idx;
1912 char *saved_cmdlines;
1913};
1914static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001915
Steven Rostedt25b0b442008-05-12 21:21:00 +02001916/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07001917static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001918
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001919static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001920{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001921 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1922}
1923
1924static inline void set_cmdline(int idx, const char *cmdline)
1925{
Tom Zanussi85f726a2019-03-05 10:12:00 -06001926 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001927}
1928
1929static int allocate_cmdlines_buffer(unsigned int val,
1930 struct saved_cmdlines_buffer *s)
1931{
Kees Cook6da2ec52018-06-12 13:55:00 -07001932 s->map_cmdline_to_pid = kmalloc_array(val,
1933 sizeof(*s->map_cmdline_to_pid),
1934 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001935 if (!s->map_cmdline_to_pid)
1936 return -ENOMEM;
1937
Kees Cook6da2ec52018-06-12 13:55:00 -07001938 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001939 if (!s->saved_cmdlines) {
1940 kfree(s->map_cmdline_to_pid);
1941 return -ENOMEM;
1942 }
1943
1944 s->cmdline_idx = 0;
1945 s->cmdline_num = val;
1946 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1947 sizeof(s->map_pid_to_cmdline));
1948 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1949 val * sizeof(*s->map_cmdline_to_pid));
1950
1951 return 0;
1952}
1953
1954static int trace_create_savedcmd(void)
1955{
1956 int ret;
1957
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001958 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001959 if (!savedcmd)
1960 return -ENOMEM;
1961
1962 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1963 if (ret < 0) {
1964 kfree(savedcmd);
1965 savedcmd = NULL;
1966 return -ENOMEM;
1967 }
1968
1969 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001970}
1971
Carsten Emdeb5130b12009-09-13 01:43:07 +02001972int is_tracing_stopped(void)
1973{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001974 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001975}
1976
Steven Rostedt0f048702008-11-05 16:05:44 -05001977/**
1978 * tracing_start - quick start of the tracer
1979 *
1980 * If tracing is enabled but was stopped by tracing_stop,
1981 * this will start the tracer back up.
1982 */
1983void tracing_start(void)
1984{
1985 struct ring_buffer *buffer;
1986 unsigned long flags;
1987
1988 if (tracing_disabled)
1989 return;
1990
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001991 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1992 if (--global_trace.stop_count) {
1993 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001994 /* Someone screwed up their debugging */
1995 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001996 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001997 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001998 goto out;
1999 }
2000
Steven Rostedta2f80712010-03-12 19:56:00 -05002001 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002002 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002003
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002004 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002005 if (buffer)
2006 ring_buffer_record_enable(buffer);
2007
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002008#ifdef CONFIG_TRACER_MAX_TRACE
2009 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002010 if (buffer)
2011 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002012#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002013
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002014 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002015
Steven Rostedt0f048702008-11-05 16:05:44 -05002016 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002017 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2018}
2019
2020static void tracing_start_tr(struct trace_array *tr)
2021{
2022 struct ring_buffer *buffer;
2023 unsigned long flags;
2024
2025 if (tracing_disabled)
2026 return;
2027
2028 /* If global, we need to also start the max tracer */
2029 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2030 return tracing_start();
2031
2032 raw_spin_lock_irqsave(&tr->start_lock, flags);
2033
2034 if (--tr->stop_count) {
2035 if (tr->stop_count < 0) {
2036 /* Someone screwed up their debugging */
2037 WARN_ON_ONCE(1);
2038 tr->stop_count = 0;
2039 }
2040 goto out;
2041 }
2042
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002043 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002044 if (buffer)
2045 ring_buffer_record_enable(buffer);
2046
2047 out:
2048 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002049}
2050
2051/**
2052 * tracing_stop - quick stop of the tracer
2053 *
2054 * Light weight way to stop tracing. Use in conjunction with
2055 * tracing_start.
2056 */
2057void tracing_stop(void)
2058{
2059 struct ring_buffer *buffer;
2060 unsigned long flags;
2061
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002062 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2063 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002064 goto out;
2065
Steven Rostedta2f80712010-03-12 19:56:00 -05002066 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002067 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002068
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002069 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002070 if (buffer)
2071 ring_buffer_record_disable(buffer);
2072
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002073#ifdef CONFIG_TRACER_MAX_TRACE
2074 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002075 if (buffer)
2076 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002077#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002078
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002079 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002080
Steven Rostedt0f048702008-11-05 16:05:44 -05002081 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002082 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2083}
2084
2085static void tracing_stop_tr(struct trace_array *tr)
2086{
2087 struct ring_buffer *buffer;
2088 unsigned long flags;
2089
2090 /* If global, we need to also stop the max tracer */
2091 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2092 return tracing_stop();
2093
2094 raw_spin_lock_irqsave(&tr->start_lock, flags);
2095 if (tr->stop_count++)
2096 goto out;
2097
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002098 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002099 if (buffer)
2100 ring_buffer_record_disable(buffer);
2101
2102 out:
2103 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002104}
2105
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002106static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002107{
Carsten Emdea635cf02009-03-18 09:00:41 +01002108 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002109
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002110 /* treat recording of idle task as a success */
2111 if (!tsk->pid)
2112 return 1;
2113
2114 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002115 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002116
2117 /*
2118 * It's not the end of the world if we don't get
2119 * the lock, but we also don't want to spin
2120 * nor do we want to disable interrupts,
2121 * so if we miss here, then better luck next time.
2122 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002123 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002124 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002125
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002126 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002127 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002128 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002129
Carsten Emdea635cf02009-03-18 09:00:41 +01002130 /*
2131 * Check whether the cmdline buffer at idx has a pid
2132 * mapped. We are going to overwrite that entry so we
2133 * need to clear the map_pid_to_cmdline. Otherwise we
2134 * would read the new comm for the old pid.
2135 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002136 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002137 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002138 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002139
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002140 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2141 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002142
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002143 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002144 }
2145
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002146 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002147
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002148 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002149
2150 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002151}
2152
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002153static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002154{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002155 unsigned map;
2156
Steven Rostedt4ca530852009-03-16 19:20:15 -04002157 if (!pid) {
2158 strcpy(comm, "<idle>");
2159 return;
2160 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002161
Steven Rostedt74bf4072010-01-25 15:11:53 -05002162 if (WARN_ON_ONCE(pid < 0)) {
2163 strcpy(comm, "<XXX>");
2164 return;
2165 }
2166
Steven Rostedt4ca530852009-03-16 19:20:15 -04002167 if (pid > PID_MAX_DEFAULT) {
2168 strcpy(comm, "<...>");
2169 return;
2170 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002171
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002172 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002173 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302174 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002175 else
2176 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002177}
2178
2179void trace_find_cmdline(int pid, char comm[])
2180{
2181 preempt_disable();
2182 arch_spin_lock(&trace_cmdline_lock);
2183
2184 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002185
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002186 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002187 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002188}
2189
Joel Fernandesd914ba32017-06-26 19:01:55 -07002190int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002191{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002192 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2193 return 0;
2194
2195 return tgid_map[pid];
2196}
2197
2198static int trace_save_tgid(struct task_struct *tsk)
2199{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002200 /* treat recording of idle task as a success */
2201 if (!tsk->pid)
2202 return 1;
2203
2204 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002205 return 0;
2206
2207 tgid_map[tsk->pid] = tsk->tgid;
2208 return 1;
2209}
2210
2211static bool tracing_record_taskinfo_skip(int flags)
2212{
2213 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2214 return true;
2215 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2216 return true;
2217 if (!__this_cpu_read(trace_taskinfo_save))
2218 return true;
2219 return false;
2220}
2221
2222/**
2223 * tracing_record_taskinfo - record the task info of a task
2224 *
2225 * @task - task to record
2226 * @flags - TRACE_RECORD_CMDLINE for recording comm
2227 * - TRACE_RECORD_TGID for recording tgid
2228 */
2229void tracing_record_taskinfo(struct task_struct *task, int flags)
2230{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002231 bool done;
2232
Joel Fernandesd914ba32017-06-26 19:01:55 -07002233 if (tracing_record_taskinfo_skip(flags))
2234 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002235
2236 /*
2237 * Record as much task information as possible. If some fail, continue
2238 * to try to record the others.
2239 */
2240 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2241 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2242
2243 /* If recording any information failed, retry again soon. */
2244 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002245 return;
2246
Joel Fernandesd914ba32017-06-26 19:01:55 -07002247 __this_cpu_write(trace_taskinfo_save, false);
2248}
2249
2250/**
2251 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2252 *
2253 * @prev - previous task during sched_switch
2254 * @next - next task during sched_switch
2255 * @flags - TRACE_RECORD_CMDLINE for recording comm
2256 * TRACE_RECORD_TGID for recording tgid
2257 */
2258void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2259 struct task_struct *next, int flags)
2260{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002261 bool done;
2262
Joel Fernandesd914ba32017-06-26 19:01:55 -07002263 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002264 return;
2265
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002266 /*
2267 * Record as much task information as possible. If some fail, continue
2268 * to try to record the others.
2269 */
2270 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2271 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2272 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2273 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002274
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002275 /* If recording any information failed, retry again soon. */
2276 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002277 return;
2278
2279 __this_cpu_write(trace_taskinfo_save, false);
2280}
2281
2282/* Helpers to record a specific task information */
2283void tracing_record_cmdline(struct task_struct *task)
2284{
2285 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2286}
2287
2288void tracing_record_tgid(struct task_struct *task)
2289{
2290 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002291}
2292
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002293/*
2294 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2295 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2296 * simplifies those functions and keeps them in sync.
2297 */
2298enum print_line_t trace_handle_return(struct trace_seq *s)
2299{
2300 return trace_seq_has_overflowed(s) ?
2301 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2302}
2303EXPORT_SYMBOL_GPL(trace_handle_return);
2304
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03002305void
Steven Rostedt38697052008-10-01 13:14:09 -04002306tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2307 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002308{
2309 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002310
Steven Rostedt777e2082008-09-29 23:02:42 -04002311 entry->preempt_count = pc & 0xff;
2312 entry->pid = (tsk) ? tsk->pid : 0;
2313 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04002314#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04002315 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04002316#else
2317 TRACE_FLAG_IRQS_NOSUPPORT |
2318#endif
Peter Zijlstra7e6867b2016-03-18 16:28:04 +01002319 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002320 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondetic59f29c2016-12-09 21:50:17 +05302321 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02002322 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2323 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002324}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02002325EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326
Steven Rostedte77405a2009-09-02 14:17:06 -04002327struct ring_buffer_event *
2328trace_buffer_lock_reserve(struct ring_buffer *buffer,
2329 int type,
2330 unsigned long len,
2331 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002332{
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002333 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002334}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002335
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002336DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2337DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2338static int trace_buffered_event_ref;
2339
2340/**
2341 * trace_buffered_event_enable - enable buffering events
2342 *
2343 * When events are being filtered, it is quicker to use a temporary
2344 * buffer to write the event data into if there's a likely chance
2345 * that it will not be committed. The discard of the ring buffer
2346 * is not as fast as committing, and is much slower than copying
2347 * a commit.
2348 *
2349 * When an event is to be filtered, allocate per cpu buffers to
2350 * write the event data into, and if the event is filtered and discarded
2351 * it is simply dropped, otherwise, the entire data is to be committed
2352 * in one shot.
2353 */
2354void trace_buffered_event_enable(void)
2355{
2356 struct ring_buffer_event *event;
2357 struct page *page;
2358 int cpu;
2359
2360 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2361
2362 if (trace_buffered_event_ref++)
2363 return;
2364
2365 for_each_tracing_cpu(cpu) {
2366 page = alloc_pages_node(cpu_to_node(cpu),
2367 GFP_KERNEL | __GFP_NORETRY, 0);
2368 if (!page)
2369 goto failed;
2370
2371 event = page_address(page);
2372 memset(event, 0, sizeof(*event));
2373
2374 per_cpu(trace_buffered_event, cpu) = event;
2375
2376 preempt_disable();
2377 if (cpu == smp_processor_id() &&
2378 this_cpu_read(trace_buffered_event) !=
2379 per_cpu(trace_buffered_event, cpu))
2380 WARN_ON_ONCE(1);
2381 preempt_enable();
2382 }
2383
2384 return;
2385 failed:
2386 trace_buffered_event_disable();
2387}
2388
2389static void enable_trace_buffered_event(void *data)
2390{
2391 /* Probably not needed, but do it anyway */
2392 smp_rmb();
2393 this_cpu_dec(trace_buffered_event_cnt);
2394}
2395
2396static void disable_trace_buffered_event(void *data)
2397{
2398 this_cpu_inc(trace_buffered_event_cnt);
2399}
2400
2401/**
2402 * trace_buffered_event_disable - disable buffering events
2403 *
2404 * When a filter is removed, it is faster to not use the buffered
2405 * events, and to commit directly into the ring buffer. Free up
2406 * the temp buffers when there are no more users. This requires
2407 * special synchronization with current events.
2408 */
2409void trace_buffered_event_disable(void)
2410{
2411 int cpu;
2412
2413 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2414
2415 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2416 return;
2417
2418 if (--trace_buffered_event_ref)
2419 return;
2420
2421 preempt_disable();
2422 /* For each CPU, set the buffer as used. */
2423 smp_call_function_many(tracing_buffer_mask,
2424 disable_trace_buffered_event, NULL, 1);
2425 preempt_enable();
2426
2427 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002428 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002429
2430 for_each_tracing_cpu(cpu) {
2431 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2432 per_cpu(trace_buffered_event, cpu) = NULL;
2433 }
2434 /*
2435 * Make sure trace_buffered_event is NULL before clearing
2436 * trace_buffered_event_cnt.
2437 */
2438 smp_wmb();
2439
2440 preempt_disable();
2441 /* Do the work on each cpu */
2442 smp_call_function_many(tracing_buffer_mask,
2443 enable_trace_buffered_event, NULL, 1);
2444 preempt_enable();
2445}
2446
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002447static struct ring_buffer *temp_buffer;
2448
Steven Rostedtef5580d2009-02-27 19:38:04 -05002449struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04002450trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002451 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002452 int type, unsigned long len,
2453 unsigned long flags, int pc)
2454{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002455 struct ring_buffer_event *entry;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002456 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002457
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002458 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002459
Tom Zanussi00b41452018-01-15 20:51:39 -06002460 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002461 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2462 (entry = this_cpu_read(trace_buffered_event))) {
2463 /* Try to use the per cpu buffer first */
2464 val = this_cpu_inc_return(trace_buffered_event_cnt);
2465 if (val == 1) {
2466 trace_event_setup(entry, type, flags, pc);
2467 entry->array[0] = len;
2468 return entry;
2469 }
2470 this_cpu_dec(trace_buffered_event_cnt);
2471 }
2472
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002473 entry = __trace_buffer_lock_reserve(*current_rb,
2474 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002475 /*
2476 * If tracing is off, but we have triggers enabled
2477 * we still need to look at the event data. Use the temp_buffer
2478 * to store the trace event for the tigger to use. It's recusive
2479 * safe and will not be recorded anywhere.
2480 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002481 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002482 *current_rb = temp_buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002483 entry = __trace_buffer_lock_reserve(*current_rb,
2484 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002485 }
2486 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002487}
2488EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2489
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002490static DEFINE_SPINLOCK(tracepoint_iter_lock);
2491static DEFINE_MUTEX(tracepoint_printk_mutex);
2492
2493static void output_printk(struct trace_event_buffer *fbuffer)
2494{
2495 struct trace_event_call *event_call;
2496 struct trace_event *event;
2497 unsigned long flags;
2498 struct trace_iterator *iter = tracepoint_print_iter;
2499
2500 /* We should never get here if iter is NULL */
2501 if (WARN_ON_ONCE(!iter))
2502 return;
2503
2504 event_call = fbuffer->trace_file->event_call;
2505 if (!event_call || !event_call->event.funcs ||
2506 !event_call->event.funcs->trace)
2507 return;
2508
2509 event = &fbuffer->trace_file->event_call->event;
2510
2511 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2512 trace_seq_init(&iter->seq);
2513 iter->ent = fbuffer->entry;
2514 event_call->event.funcs->trace(iter, 0, event);
2515 trace_seq_putc(&iter->seq, 0);
2516 printk("%s", iter->seq.buffer);
2517
2518 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2519}
2520
2521int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2522 void __user *buffer, size_t *lenp,
2523 loff_t *ppos)
2524{
2525 int save_tracepoint_printk;
2526 int ret;
2527
2528 mutex_lock(&tracepoint_printk_mutex);
2529 save_tracepoint_printk = tracepoint_printk;
2530
2531 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2532
2533 /*
2534 * This will force exiting early, as tracepoint_printk
2535 * is always zero when tracepoint_printk_iter is not allocated
2536 */
2537 if (!tracepoint_print_iter)
2538 tracepoint_printk = 0;
2539
2540 if (save_tracepoint_printk == tracepoint_printk)
2541 goto out;
2542
2543 if (tracepoint_printk)
2544 static_key_enable(&tracepoint_printk_key.key);
2545 else
2546 static_key_disable(&tracepoint_printk_key.key);
2547
2548 out:
2549 mutex_unlock(&tracepoint_printk_mutex);
2550
2551 return ret;
2552}
2553
2554void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2555{
2556 if (static_key_false(&tracepoint_printk_key.key))
2557 output_printk(fbuffer);
2558
2559 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2560 fbuffer->event, fbuffer->entry,
2561 fbuffer->flags, fbuffer->pc);
2562}
2563EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2564
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002565/*
2566 * Skip 3:
2567 *
2568 * trace_buffer_unlock_commit_regs()
2569 * trace_event_buffer_commit()
2570 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302571 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002572# define STACK_SKIP 3
2573
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002574void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2575 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002576 struct ring_buffer_event *event,
2577 unsigned long flags, int pc,
2578 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002579{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002580 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002581
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002582 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002583 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002584 * Note, we can still get here via blktrace, wakeup tracer
2585 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002586 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002587 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002588 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002589 ftrace_trace_userstack(buffer, flags, pc);
2590}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002591
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002592/*
2593 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2594 */
2595void
2596trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2597 struct ring_buffer_event *event)
2598{
2599 __buffer_unlock_commit(buffer, event);
2600}
2601
Chunyan Zhang478409d2016-11-21 15:57:18 +08002602static void
2603trace_process_export(struct trace_export *export,
2604 struct ring_buffer_event *event)
2605{
2606 struct trace_entry *entry;
2607 unsigned int size = 0;
2608
2609 entry = ring_buffer_event_data(event);
2610 size = ring_buffer_event_length(event);
Felipe Balbia773d412017-06-02 13:20:25 +03002611 export->write(export, entry, size);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002612}
2613
2614static DEFINE_MUTEX(ftrace_export_lock);
2615
2616static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2617
2618static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2619
2620static inline void ftrace_exports_enable(void)
2621{
2622 static_branch_enable(&ftrace_exports_enabled);
2623}
2624
2625static inline void ftrace_exports_disable(void)
2626{
2627 static_branch_disable(&ftrace_exports_enabled);
2628}
2629
Mathieu Malaterre1cce3772018-05-16 21:30:12 +02002630static void ftrace_exports(struct ring_buffer_event *event)
Chunyan Zhang478409d2016-11-21 15:57:18 +08002631{
2632 struct trace_export *export;
2633
2634 preempt_disable_notrace();
2635
2636 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2637 while (export) {
2638 trace_process_export(export, event);
2639 export = rcu_dereference_raw_notrace(export->next);
2640 }
2641
2642 preempt_enable_notrace();
2643}
2644
2645static inline void
2646add_trace_export(struct trace_export **list, struct trace_export *export)
2647{
2648 rcu_assign_pointer(export->next, *list);
2649 /*
2650 * We are entering export into the list but another
2651 * CPU might be walking that list. We need to make sure
2652 * the export->next pointer is valid before another CPU sees
2653 * the export pointer included into the list.
2654 */
2655 rcu_assign_pointer(*list, export);
2656}
2657
2658static inline int
2659rm_trace_export(struct trace_export **list, struct trace_export *export)
2660{
2661 struct trace_export **p;
2662
2663 for (p = list; *p != NULL; p = &(*p)->next)
2664 if (*p == export)
2665 break;
2666
2667 if (*p != export)
2668 return -1;
2669
2670 rcu_assign_pointer(*p, (*p)->next);
2671
2672 return 0;
2673}
2674
2675static inline void
2676add_ftrace_export(struct trace_export **list, struct trace_export *export)
2677{
2678 if (*list == NULL)
2679 ftrace_exports_enable();
2680
2681 add_trace_export(list, export);
2682}
2683
2684static inline int
2685rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2686{
2687 int ret;
2688
2689 ret = rm_trace_export(list, export);
2690 if (*list == NULL)
2691 ftrace_exports_disable();
2692
2693 return ret;
2694}
2695
2696int register_ftrace_export(struct trace_export *export)
2697{
2698 if (WARN_ON_ONCE(!export->write))
2699 return -1;
2700
2701 mutex_lock(&ftrace_export_lock);
2702
2703 add_ftrace_export(&ftrace_exports_list, export);
2704
2705 mutex_unlock(&ftrace_export_lock);
2706
2707 return 0;
2708}
2709EXPORT_SYMBOL_GPL(register_ftrace_export);
2710
2711int unregister_ftrace_export(struct trace_export *export)
2712{
2713 int ret;
2714
2715 mutex_lock(&ftrace_export_lock);
2716
2717 ret = rm_ftrace_export(&ftrace_exports_list, export);
2718
2719 mutex_unlock(&ftrace_export_lock);
2720
2721 return ret;
2722}
2723EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2724
Ingo Molnare309b412008-05-12 21:20:51 +02002725void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002726trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04002727 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2728 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002729{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002730 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002731 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002732 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002733 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002734
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002735 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2736 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002737 if (!event)
2738 return;
2739 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002740 entry->ip = ip;
2741 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002742
Chunyan Zhang478409d2016-11-21 15:57:18 +08002743 if (!call_filter_check_discard(call, entry, buffer, event)) {
2744 if (static_branch_unlikely(&ftrace_exports_enabled))
2745 ftrace_exports(event);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002746 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002747 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002748}
2749
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002750#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002751
2752#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2753struct ftrace_stack {
2754 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2755};
2756
2757static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2758static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2759
Steven Rostedte77405a2009-09-02 14:17:06 -04002760static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05002761 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002762 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002763{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002764 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002765 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002766 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02002767 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002768 int use_stack;
2769 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02002770
2771 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02002772 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02002773
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002774 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002775 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002776 * If regs is set, then these functions will not be in the way.
2777 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002778#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002779 if (!regs)
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002780 trace.skip++;
2781#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002782
2783 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002784 * Since events can happen in NMIs there's no safe way to
2785 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2786 * or NMI comes in, it will just have to use the default
2787 * FTRACE_STACK_SIZE.
2788 */
2789 preempt_disable_notrace();
2790
Shan Wei82146522012-11-19 13:21:01 +08002791 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002792 /*
2793 * We don't need any atomic variables, just a barrier.
2794 * If an interrupt comes in, we don't care, because it would
2795 * have exited and put the counter back to what we want.
2796 * We just need a barrier to keep gcc from moving things
2797 * around.
2798 */
2799 barrier();
2800 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05002801 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002802 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2803
2804 if (regs)
2805 save_stack_trace_regs(regs, &trace);
2806 else
2807 save_stack_trace(&trace);
2808
2809 if (trace.nr_entries > size)
2810 size = trace.nr_entries;
2811 } else
2812 /* From now on, use_stack is a boolean */
2813 use_stack = 0;
2814
2815 size *= sizeof(unsigned long);
2816
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002817 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2818 sizeof(*entry) + size, flags, pc);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002819 if (!event)
2820 goto out;
2821 entry = ring_buffer_event_data(event);
2822
2823 memset(&entry->caller, 0, size);
2824
2825 if (use_stack)
2826 memcpy(&entry->caller, trace.entries,
2827 trace.nr_entries * sizeof(unsigned long));
2828 else {
2829 trace.max_entries = FTRACE_STACK_ENTRIES;
2830 trace.entries = entry->caller;
2831 if (regs)
2832 save_stack_trace_regs(regs, &trace);
2833 else
2834 save_stack_trace(&trace);
2835 }
2836
2837 entry->size = trace.nr_entries;
2838
Tom Zanussif306cc82013-10-24 08:34:17 -05002839 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002840 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002841
2842 out:
2843 /* Again, don't let gcc optimize things here */
2844 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002845 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002846 preempt_enable_notrace();
2847
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002848}
2849
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002850static inline void ftrace_trace_stack(struct trace_array *tr,
2851 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002852 unsigned long flags,
2853 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05002854{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002855 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05002856 return;
2857
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002858 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05002859}
2860
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002861void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2862 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04002863{
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002864 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2865
2866 if (rcu_is_watching()) {
2867 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2868 return;
2869 }
2870
2871 /*
2872 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2873 * but if the above rcu_is_watching() failed, then the NMI
2874 * triggered someplace critical, and rcu_irq_enter() should
2875 * not be called from NMI.
2876 */
2877 if (unlikely(in_nmi()))
2878 return;
2879
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04002880 rcu_irq_enter_irqson();
2881 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2882 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04002883}
2884
Steven Rostedt03889382009-12-11 09:48:22 -05002885/**
2886 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002887 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05002888 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002889void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05002890{
2891 unsigned long flags;
2892
2893 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05002894 return;
Steven Rostedt03889382009-12-11 09:48:22 -05002895
2896 local_save_flags(flags);
2897
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002898#ifndef CONFIG_UNWINDER_ORC
2899 /* Skip 1 to skip this function. */
2900 skip++;
2901#endif
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04002902 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2903 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05002904}
Nikolay Borisovda387e52018-10-17 09:51:43 +03002905EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05002906
Steven Rostedt91e86e52010-11-10 12:56:12 +01002907static DEFINE_PER_CPU(int, user_stack_count);
2908
Steven Rostedte77405a2009-09-02 14:17:06 -04002909void
2910ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02002911{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002912 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02002913 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02002914 struct userstack_entry *entry;
2915 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02002916
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002917 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02002918 return;
2919
Steven Rostedtb6345872010-03-12 20:03:30 -05002920 /*
2921 * NMIs can not handle page faults, even with fix ups.
2922 * The save user stack can (and often does) fault.
2923 */
2924 if (unlikely(in_nmi()))
2925 return;
2926
Steven Rostedt91e86e52010-11-10 12:56:12 +01002927 /*
2928 * prevent recursion, since the user stack tracing may
2929 * trigger other kernel events.
2930 */
2931 preempt_disable();
2932 if (__this_cpu_read(user_stack_count))
2933 goto out;
2934
2935 __this_cpu_inc(user_stack_count);
2936
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002937 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2938 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002939 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002940 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002941 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002942
Steven Rostedt48659d32009-09-11 11:36:23 -04002943 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002944 memset(&entry->caller, 0, sizeof(entry->caller));
2945
2946 trace.nr_entries = 0;
2947 trace.max_entries = FTRACE_STACK_ENTRIES;
2948 trace.skip = 0;
2949 trace.entries = entry->caller;
2950
2951 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002952 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002953 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002954
Li Zefan1dbd1952010-12-09 15:47:56 +08002955 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002956 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002957 out:
2958 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002959}
2960
Hannes Eder4fd27352009-02-10 19:44:12 +01002961#ifdef UNUSED
2962static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002963{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002964 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002965}
Hannes Eder4fd27352009-02-10 19:44:12 +01002966#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002967
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002968#endif /* CONFIG_STACKTRACE */
2969
Steven Rostedt07d777f2011-09-22 14:01:55 -04002970/* created for use with alloc_percpu */
2971struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002972 int nesting;
2973 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04002974};
2975
2976static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002977
2978/*
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002979 * Thise allows for lockless recording. If we're nested too deeply, then
2980 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04002981 */
2982static char *get_trace_buf(void)
2983{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002984 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002985
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002986 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002987 return NULL;
2988
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002989 buffer->nesting++;
2990
2991 /* Interrupts must see nesting incremented before we use the buffer */
2992 barrier();
2993 return &buffer->buffer[buffer->nesting][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07002994}
2995
2996static void put_trace_buf(void)
2997{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04002998 /* Don't let the decrement of nesting leak before this */
2999 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003000 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003001}
3002
3003static int alloc_percpu_trace_buffer(void)
3004{
3005 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003006
3007 buffers = alloc_percpu(struct trace_buffer_struct);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003008 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3009 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003010
3011 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003012 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003013}
3014
Steven Rostedt81698832012-10-11 10:15:05 -04003015static int buffers_allocated;
3016
Steven Rostedt07d777f2011-09-22 14:01:55 -04003017void trace_printk_init_buffers(void)
3018{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003019 if (buffers_allocated)
3020 return;
3021
3022 if (alloc_percpu_trace_buffer())
3023 return;
3024
Steven Rostedt2184db42014-05-28 13:14:40 -04003025 /* trace_printk() is for debug use only. Don't use it in production. */
3026
Joe Perchesa395d6a2016-03-22 14:28:09 -07003027 pr_warn("\n");
3028 pr_warn("**********************************************************\n");
3029 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3030 pr_warn("** **\n");
3031 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3032 pr_warn("** **\n");
3033 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3034 pr_warn("** unsafe for production use. **\n");
3035 pr_warn("** **\n");
3036 pr_warn("** If you see this message and you are not debugging **\n");
3037 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3038 pr_warn("** **\n");
3039 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3040 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003041
Steven Rostedtb382ede62012-10-10 21:44:34 -04003042 /* Expand the buffers to set size */
3043 tracing_update_buffers();
3044
Steven Rostedt07d777f2011-09-22 14:01:55 -04003045 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003046
3047 /*
3048 * trace_printk_init_buffers() can be called by modules.
3049 * If that happens, then we need to start cmdline recording
3050 * directly here. If the global_trace.buffer is already
3051 * allocated here, then this was called by module code.
3052 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003053 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003054 tracing_start_cmdline_record();
3055}
3056
3057void trace_printk_start_comm(void)
3058{
3059 /* Start tracing comms if trace printk is set */
3060 if (!buffers_allocated)
3061 return;
3062 tracing_start_cmdline_record();
3063}
3064
3065static void trace_printk_start_stop_comm(int enabled)
3066{
3067 if (!buffers_allocated)
3068 return;
3069
3070 if (enabled)
3071 tracing_start_cmdline_record();
3072 else
3073 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003074}
3075
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003076/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003077 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003078 *
3079 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003080int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003081{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003082 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003083 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04003084 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003085 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003086 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003087 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003088 char *tbuffer;
3089 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003090
3091 if (unlikely(tracing_selftest_running || tracing_disabled))
3092 return 0;
3093
3094 /* Don't pollute graph traces with trace_vprintk internals */
3095 pause_graph_tracing();
3096
3097 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003098 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003099
Steven Rostedt07d777f2011-09-22 14:01:55 -04003100 tbuffer = get_trace_buf();
3101 if (!tbuffer) {
3102 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003103 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003104 }
3105
3106 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3107
3108 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003109 goto out;
3110
Steven Rostedt07d777f2011-09-22 14:01:55 -04003111 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003112 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003113 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003114 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3115 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003116 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003117 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003118 entry = ring_buffer_event_data(event);
3119 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003120 entry->fmt = fmt;
3121
Steven Rostedt07d777f2011-09-22 14:01:55 -04003122 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003123 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003124 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003125 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003126 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003127
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003128out:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003129 put_trace_buf();
3130
3131out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003132 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003133 unpause_graph_tracing();
3134
3135 return len;
3136}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003137EXPORT_SYMBOL_GPL(trace_vbprintk);
3138
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003139__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003140static int
3141__trace_array_vprintk(struct ring_buffer *buffer,
3142 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003143{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003144 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003145 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003146 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003147 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003148 unsigned long flags;
3149 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003150
3151 if (tracing_disabled || tracing_selftest_running)
3152 return 0;
3153
Steven Rostedt07d777f2011-09-22 14:01:55 -04003154 /* Don't pollute graph traces with trace_vprintk internals */
3155 pause_graph_tracing();
3156
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003157 pc = preempt_count();
3158 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003159
Steven Rostedt07d777f2011-09-22 14:01:55 -04003160
3161 tbuffer = get_trace_buf();
3162 if (!tbuffer) {
3163 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003164 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003165 }
3166
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003167 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003168
Steven Rostedt07d777f2011-09-22 14:01:55 -04003169 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003170 size = sizeof(*entry) + len + 1;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003171 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3172 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003173 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003174 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003175 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003176 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003177
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003178 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003179 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003180 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003181 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003182 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003183
3184out:
3185 put_trace_buf();
3186
3187out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003188 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003189 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003190
3191 return len;
3192}
Steven Rostedt659372d2009-09-03 19:11:07 -04003193
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003194__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003195int trace_array_vprintk(struct trace_array *tr,
3196 unsigned long ip, const char *fmt, va_list args)
3197{
3198 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3199}
3200
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003201__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003202int trace_array_printk(struct trace_array *tr,
3203 unsigned long ip, const char *fmt, ...)
3204{
3205 int ret;
3206 va_list ap;
3207
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003208 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003209 return 0;
3210
3211 va_start(ap, fmt);
3212 ret = trace_array_vprintk(tr, ip, fmt, ap);
3213 va_end(ap);
3214 return ret;
3215}
3216
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003217__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003218int trace_array_printk_buf(struct ring_buffer *buffer,
3219 unsigned long ip, const char *fmt, ...)
3220{
3221 int ret;
3222 va_list ap;
3223
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003224 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003225 return 0;
3226
3227 va_start(ap, fmt);
3228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3229 va_end(ap);
3230 return ret;
3231}
3232
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003233__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003234int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3235{
Steven Rostedta813a152009-10-09 01:41:35 -04003236 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003237}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003238EXPORT_SYMBOL_GPL(trace_vprintk);
3239
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003240static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003241{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003242 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3243
Steven Rostedt5a90f572008-09-03 17:42:51 -04003244 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003245 if (buf_iter)
3246 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003247}
3248
Ingo Molnare309b412008-05-12 21:20:51 +02003249static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003250peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3251 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003252{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003253 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003254 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003255
Steven Rostedtd7690412008-10-01 00:29:53 -04003256 if (buf_iter)
3257 event = ring_buffer_iter_peek(buf_iter, ts);
3258 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003259 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003260 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04003261
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003262 if (event) {
3263 iter->ent_size = ring_buffer_event_length(event);
3264 return ring_buffer_event_data(event);
3265 }
3266 iter->ent_size = 0;
3267 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003268}
Steven Rostedtd7690412008-10-01 00:29:53 -04003269
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003270static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003271__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3272 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003273{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003274 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003275 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003276 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003277 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003278 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003279 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003280 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003281 int cpu;
3282
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003283 /*
3284 * If we are in a per_cpu trace file, don't bother by iterating over
3285 * all cpu and peek directly.
3286 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003287 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003288 if (ring_buffer_empty_cpu(buffer, cpu_file))
3289 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003290 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003291 if (ent_cpu)
3292 *ent_cpu = cpu_file;
3293
3294 return ent;
3295 }
3296
Steven Rostedtab464282008-05-12 21:21:00 +02003297 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003298
3299 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003300 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003301
Steven Rostedtbc21b472010-03-31 19:49:26 -04003302 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003303
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003304 /*
3305 * Pick the entry with the smallest timestamp:
3306 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003307 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003308 next = ent;
3309 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003310 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003311 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003312 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003313 }
3314 }
3315
Steven Rostedt12b5da32012-03-27 10:43:28 -04003316 iter->ent_size = next_size;
3317
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003318 if (ent_cpu)
3319 *ent_cpu = next_cpu;
3320
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003321 if (ent_ts)
3322 *ent_ts = next_ts;
3323
Steven Rostedtbc21b472010-03-31 19:49:26 -04003324 if (missing_events)
3325 *missing_events = next_lost;
3326
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327 return next;
3328}
3329
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003330/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003331struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3332 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003333{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003334 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003335}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003336
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003337/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003338void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003339{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003340 iter->ent = __find_next_entry(iter, &iter->cpu,
3341 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003342
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003343 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003344 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003345
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003346 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003347}
3348
Ingo Molnare309b412008-05-12 21:20:51 +02003349static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003350{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003351 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003352 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003353}
3354
Ingo Molnare309b412008-05-12 21:20:51 +02003355static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003356{
3357 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003358 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003359 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003360
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003361 WARN_ON_ONCE(iter->leftover);
3362
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003363 (*pos)++;
3364
3365 /* can't go backwards */
3366 if (iter->idx > i)
3367 return NULL;
3368
3369 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003370 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003371 else
3372 ent = iter;
3373
3374 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003375 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003376
3377 iter->pos = *pos;
3378
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003379 return ent;
3380}
3381
Jason Wessel955b61e2010-08-05 09:22:23 -05003382void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003383{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003384 struct ring_buffer_event *event;
3385 struct ring_buffer_iter *buf_iter;
3386 unsigned long entries = 0;
3387 u64 ts;
3388
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003389 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003390
Steven Rostedt6d158a82012-06-27 20:46:14 -04003391 buf_iter = trace_buffer_iter(iter, cpu);
3392 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003393 return;
3394
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003395 ring_buffer_iter_reset(buf_iter);
3396
3397 /*
3398 * We could have the case with the max latency tracers
3399 * that a reset never took place on a cpu. This is evident
3400 * by the timestamp being before the start of the buffer.
3401 */
3402 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003403 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003404 break;
3405 entries++;
3406 ring_buffer_read(buf_iter, NULL);
3407 }
3408
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003409 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003410}
3411
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003412/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003413 * The current tracer is copied to avoid a global locking
3414 * all around.
3415 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003416static void *s_start(struct seq_file *m, loff_t *pos)
3417{
3418 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003419 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003420 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003421 void *p = NULL;
3422 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003423 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003424
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003425 /*
3426 * copy the tracer to avoid using a global lock all around.
3427 * iter->trace is a copy of current_trace, the pointer to the
3428 * name may be used instead of a strcmp(), as iter->trace->name
3429 * will point to the same string as current_trace->name.
3430 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003431 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003432 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3433 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003434 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003436#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003437 if (iter->snapshot && iter->trace->use_max_tr)
3438 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003439#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003440
3441 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003442 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003444 if (*pos != iter->pos) {
3445 iter->ent = NULL;
3446 iter->cpu = 0;
3447 iter->idx = -1;
3448
Steven Rostedtae3b5092013-01-23 15:22:59 -05003449 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003450 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003451 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003452 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003453 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003454
Lai Jiangshanac91d852010-03-02 17:54:50 +08003455 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003456 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3457 ;
3458
3459 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003460 /*
3461 * If we overflowed the seq_file before, then we want
3462 * to just reuse the trace_seq buffer again.
3463 */
3464 if (iter->leftover)
3465 p = iter;
3466 else {
3467 l = *pos - 1;
3468 p = s_next(m, p, &l);
3469 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003470 }
3471
Lai Jiangshan4f535962009-05-18 19:35:34 +08003472 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003473 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003474 return p;
3475}
3476
3477static void s_stop(struct seq_file *m, void *p)
3478{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003479 struct trace_iterator *iter = m->private;
3480
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003481#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003482 if (iter->snapshot && iter->trace->use_max_tr)
3483 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003484#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003485
3486 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003487 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003488
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003489 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003490 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003491}
3492
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003493static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003494get_total_entries(struct trace_buffer *buf,
3495 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003496{
3497 unsigned long count;
3498 int cpu;
3499
3500 *total = 0;
3501 *entries = 0;
3502
3503 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003504 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003505 /*
3506 * If this buffer has skipped entries, then we hold all
3507 * entries for the trace and we need to ignore the
3508 * ones before the time stamp.
3509 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003510 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3511 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003512 /* total is the same as the entries */
3513 *total += count;
3514 } else
3515 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003516 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003517 *entries += count;
3518 }
3519}
3520
Ingo Molnare309b412008-05-12 21:20:51 +02003521static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003522{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003523 seq_puts(m, "# _------=> CPU# \n"
3524 "# / _-----=> irqs-off \n"
3525 "# | / _----=> need-resched \n"
3526 "# || / _---=> hardirq/softirq \n"
3527 "# ||| / _--=> preempt-depth \n"
3528 "# |||| / delay \n"
3529 "# cmd pid ||||| time | caller \n"
3530 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003531}
3532
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003533static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003534{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003535 unsigned long total;
3536 unsigned long entries;
3537
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003538 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003539 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3540 entries, total, num_online_cpus());
3541 seq_puts(m, "#\n");
3542}
3543
Joel Fernandes441dae82017-06-25 22:38:43 -07003544static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3545 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003546{
Joel Fernandes441dae82017-06-25 22:38:43 -07003547 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3548
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003549 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07003550
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003551 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3552 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003553}
3554
Joel Fernandes441dae82017-06-25 22:38:43 -07003555static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3556 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05003557{
Joel Fernandes441dae82017-06-25 22:38:43 -07003558 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003559 const char tgid_space[] = " ";
3560 const char space[] = " ";
Joel Fernandes441dae82017-06-25 22:38:43 -07003561
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003562 seq_printf(m, "# %s _-----=> irqs-off\n",
3563 tgid ? tgid_space : space);
3564 seq_printf(m, "# %s / _----=> need-resched\n",
3565 tgid ? tgid_space : space);
3566 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3567 tgid ? tgid_space : space);
3568 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3569 tgid ? tgid_space : space);
3570 seq_printf(m, "# %s||| / delay\n",
3571 tgid ? tgid_space : space);
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003572 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003573 tgid ? " TGID " : space);
Joel Fernandes (Google)f8494fa2018-06-25 17:08:22 -07003574 seq_printf(m, "# | | %s | |||| | |\n",
Steven Rostedt (VMware)b11fb732017-07-11 15:43:24 -04003575 tgid ? " | " : space);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003576}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003577
Jiri Olsa62b915f2010-04-02 19:01:22 +02003578void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003579print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3580{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003581 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003582 struct trace_buffer *buf = iter->trace_buffer;
3583 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003584 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05003585 unsigned long entries;
3586 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003587 const char *name = "preemption";
3588
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05003589 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003590
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003591 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003592
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003593 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003594 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003595 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003596 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003597 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003598 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02003599 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003600 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02003601 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003602 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003603#if defined(CONFIG_PREEMPT_NONE)
3604 "server",
3605#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3606 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04003607#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003608 "preempt",
3609#else
3610 "unknown",
3611#endif
3612 /* These are reserved for later use */
3613 0, 0, 0, 0);
3614#ifdef CONFIG_SMP
3615 seq_printf(m, " #P:%d)\n", num_online_cpus());
3616#else
3617 seq_puts(m, ")\n");
3618#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003619 seq_puts(m, "# -----------------\n");
3620 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003621 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07003622 data->comm, data->pid,
3623 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003624 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003625 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003626
3627 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003628 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003629 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3630 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003631 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02003632 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3633 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04003634 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003635 }
3636
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09003637 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003638}
3639
Steven Rostedta3097202008-11-07 22:36:02 -05003640static void test_cpu_buff_start(struct trace_iterator *iter)
3641{
3642 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003643 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05003644
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003645 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003646 return;
3647
3648 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3649 return;
3650
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003651 if (cpumask_available(iter->started) &&
3652 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05003653 return;
3654
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003655 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003656 return;
3657
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07003658 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04003659 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003660
3661 /* Don't print started cpu buffer for the first entry of the trace */
3662 if (iter->idx > 1)
3663 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3664 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05003665}
3666
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003667static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003668{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003669 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02003670 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003671 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003672 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003673 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003674
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003675 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003676
Steven Rostedta3097202008-11-07 22:36:02 -05003677 test_cpu_buff_start(iter);
3678
Steven Rostedtf633cef2008-12-23 23:24:13 -05003679 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003680
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003681 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003682 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3683 trace_print_lat_context(iter);
3684 else
3685 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003686 }
3687
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003688 if (trace_seq_has_overflowed(s))
3689 return TRACE_TYPE_PARTIAL_LINE;
3690
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003691 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003692 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003693
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003694 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003695
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003696 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003697}
3698
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003699static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003700{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003701 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003702 struct trace_seq *s = &iter->seq;
3703 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003704 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003705
3706 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003707
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003708 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003709 trace_seq_printf(s, "%d %d %llu ",
3710 entry->pid, iter->cpu, iter->ts);
3711
3712 if (trace_seq_has_overflowed(s))
3713 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003714
Steven Rostedtf633cef2008-12-23 23:24:13 -05003715 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003716 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04003717 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003718
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003719 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04003720
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003721 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003722}
3723
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003724static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003725{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003726 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003727 struct trace_seq *s = &iter->seq;
3728 unsigned char newline = '\n';
3729 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003730 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003731
3732 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003733
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003734 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003735 SEQ_PUT_HEX_FIELD(s, entry->pid);
3736 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3737 SEQ_PUT_HEX_FIELD(s, iter->ts);
3738 if (trace_seq_has_overflowed(s))
3739 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003740 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003741
Steven Rostedtf633cef2008-12-23 23:24:13 -05003742 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02003743 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04003744 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02003745 if (ret != TRACE_TYPE_HANDLED)
3746 return ret;
3747 }
Steven Rostedt7104f302008-10-01 10:52:51 -04003748
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003749 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003750
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003751 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003752}
3753
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003754static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003755{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003756 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003757 struct trace_seq *s = &iter->seq;
3758 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05003759 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003760
3761 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003762
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003763 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003764 SEQ_PUT_FIELD(s, entry->pid);
3765 SEQ_PUT_FIELD(s, iter->cpu);
3766 SEQ_PUT_FIELD(s, iter->ts);
3767 if (trace_seq_has_overflowed(s))
3768 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003769 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003770
Steven Rostedtf633cef2008-12-23 23:24:13 -05003771 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04003772 return event ? event->funcs->binary(iter, 0, event) :
3773 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003774}
3775
Jiri Olsa62b915f2010-04-02 19:01:22 +02003776int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003777{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003778 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003779 int cpu;
3780
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003781 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003782 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003783 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003784 buf_iter = trace_buffer_iter(iter, cpu);
3785 if (buf_iter) {
3786 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003787 return 0;
3788 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003789 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04003790 return 0;
3791 }
3792 return 1;
3793 }
3794
Steven Rostedtab464282008-05-12 21:21:00 +02003795 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04003796 buf_iter = trace_buffer_iter(iter, cpu);
3797 if (buf_iter) {
3798 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04003799 return 0;
3800 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003801 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04003802 return 0;
3803 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003804 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003805
Frederic Weisbecker797d3712008-09-30 18:13:45 +02003806 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003807}
3808
Lai Jiangshan4f535962009-05-18 19:35:34 +08003809/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05003810enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003811{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003812 struct trace_array *tr = iter->tr;
3813 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003814 enum print_line_t ret;
3815
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05003816 if (iter->lost_events) {
3817 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3818 iter->cpu, iter->lost_events);
3819 if (trace_seq_has_overflowed(&iter->seq))
3820 return TRACE_TYPE_PARTIAL_LINE;
3821 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04003822
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003823 if (iter->trace && iter->trace->print_line) {
3824 ret = iter->trace->print_line(iter);
3825 if (ret != TRACE_TYPE_UNHANDLED)
3826 return ret;
3827 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02003828
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05003829 if (iter->ent->type == TRACE_BPUTS &&
3830 trace_flags & TRACE_ITER_PRINTK &&
3831 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3832 return trace_print_bputs_msg_only(iter);
3833
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003834 if (iter->ent->type == TRACE_BPRINT &&
3835 trace_flags & TRACE_ITER_PRINTK &&
3836 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003837 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003838
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003839 if (iter->ent->type == TRACE_PRINT &&
3840 trace_flags & TRACE_ITER_PRINTK &&
3841 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04003842 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01003843
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02003844 if (trace_flags & TRACE_ITER_BIN)
3845 return print_bin_fmt(iter);
3846
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02003847 if (trace_flags & TRACE_ITER_HEX)
3848 return print_hex_fmt(iter);
3849
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003850 if (trace_flags & TRACE_ITER_RAW)
3851 return print_raw_fmt(iter);
3852
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003853 return print_trace_fmt(iter);
3854}
3855
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003856void trace_latency_header(struct seq_file *m)
3857{
3858 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003859 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003860
3861 /* print nothing if the buffers are empty */
3862 if (trace_empty(iter))
3863 return;
3864
3865 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3866 print_trace_header(m, iter);
3867
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003868 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01003869 print_lat_help_header(m);
3870}
3871
Jiri Olsa62b915f2010-04-02 19:01:22 +02003872void trace_default_header(struct seq_file *m)
3873{
3874 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003875 struct trace_array *tr = iter->tr;
3876 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02003877
Jiri Olsaf56e7f82011-06-03 16:58:49 +02003878 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3879 return;
3880
Jiri Olsa62b915f2010-04-02 19:01:22 +02003881 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3882 /* print nothing if the buffers are empty */
3883 if (trace_empty(iter))
3884 return;
3885 print_trace_header(m, iter);
3886 if (!(trace_flags & TRACE_ITER_VERBOSE))
3887 print_lat_help_header(m);
3888 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05003889 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3890 if (trace_flags & TRACE_ITER_IRQ_INFO)
Joel Fernandes441dae82017-06-25 22:38:43 -07003891 print_func_help_header_irq(iter->trace_buffer,
3892 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003893 else
Joel Fernandes441dae82017-06-25 22:38:43 -07003894 print_func_help_header(iter->trace_buffer, m,
3895 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003896 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003897 }
3898}
3899
Steven Rostedte0a413f2011-09-29 21:26:16 -04003900static void test_ftrace_alive(struct seq_file *m)
3901{
3902 if (!ftrace_is_dead())
3903 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003904 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3905 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003906}
3907
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003908#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003909static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003910{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003911 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3912 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3913 "# Takes a snapshot of the main buffer.\n"
3914 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3915 "# (Doesn't have to be '2' works with any number that\n"
3916 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003917}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003918
3919static void show_snapshot_percpu_help(struct seq_file *m)
3920{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003921 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003922#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003923 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3924 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003925#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003926 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3927 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003928#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003929 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3930 "# (Doesn't have to be '2' works with any number that\n"
3931 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003932}
3933
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003934static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3935{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003936 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003937 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003938 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003939 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003940
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003941 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003942 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3943 show_snapshot_main_help(m);
3944 else
3945 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003946}
3947#else
3948/* Should never be called */
3949static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3950#endif
3951
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003952static int s_show(struct seq_file *m, void *v)
3953{
3954 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003955 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003956
3957 if (iter->ent == NULL) {
3958 if (iter->tr) {
3959 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3960 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003961 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003962 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003963 if (iter->snapshot && trace_empty(iter))
3964 print_snapshot_help(m, iter);
3965 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003966 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003967 else
3968 trace_default_header(m);
3969
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003970 } else if (iter->leftover) {
3971 /*
3972 * If we filled the seq_file buffer earlier, we
3973 * want to just show it now.
3974 */
3975 ret = trace_print_seq(m, &iter->seq);
3976
3977 /* ret should this time be zero, but you never know */
3978 iter->leftover = ret;
3979
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003980 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003981 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003982 ret = trace_print_seq(m, &iter->seq);
3983 /*
3984 * If we overflow the seq_file buffer, then it will
3985 * ask us for this data again at start up.
3986 * Use that instead.
3987 * ret is 0 if seq_file write succeeded.
3988 * -1 otherwise.
3989 */
3990 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003991 }
3992
3993 return 0;
3994}
3995
Oleg Nesterov649e9c702013-07-23 17:25:54 +02003996/*
3997 * Should be used after trace_array_get(), trace_types_lock
3998 * ensures that i_cdev was already initialized.
3999 */
4000static inline int tracing_get_cpu(struct inode *inode)
4001{
4002 if (inode->i_cdev) /* See trace_create_cpu_file() */
4003 return (long)inode->i_cdev - 1;
4004 return RING_BUFFER_ALL_CPUS;
4005}
4006
James Morris88e9d342009-09-22 16:43:43 -07004007static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004008 .start = s_start,
4009 .next = s_next,
4010 .stop = s_stop,
4011 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004012};
4013
Ingo Molnare309b412008-05-12 21:20:51 +02004014static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004015__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004016{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004017 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004018 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004019 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004020
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004021 if (tracing_disabled)
4022 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004023
Jiri Olsa50e18b92012-04-25 10:23:39 +02004024 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004025 if (!iter)
4026 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004027
Gil Fruchter72917232015-06-09 10:32:35 +03004028 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004029 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004030 if (!iter->buffer_iter)
4031 goto release;
4032
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004033 /*
4034 * We make a copy of the current tracer to avoid concurrent
4035 * changes on it while we are reading.
4036 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004037 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004038 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004039 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004040 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004041
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004042 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004043
Li Zefan79f55992009-06-15 14:58:26 +08004044 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004045 goto fail;
4046
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004047 iter->tr = tr;
4048
4049#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004050 /* Currently only the top directory has a snapshot */
4051 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004052 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004053 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004054#endif
4055 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004056 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004057 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004058 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004059 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004060
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004061 /* Notify the tracer early; before we stop tracing. */
4062 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004063 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004064
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004065 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004066 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004067 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4068
David Sharp8be07092012-11-13 12:18:22 -08004069 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004070 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004071 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4072
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004073 /* stop the trace while dumping if we are not opening "snapshot" */
4074 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004075 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004076
Steven Rostedtae3b5092013-01-23 15:22:59 -05004077 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004078 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004079 iter->buffer_iter[cpu] =
Douglas Anderson31b265b2019-03-08 11:32:04 -08004080 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4081 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004082 }
4083 ring_buffer_read_prepare_sync();
4084 for_each_tracing_cpu(cpu) {
4085 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004086 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004087 }
4088 } else {
4089 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004090 iter->buffer_iter[cpu] =
Douglas Anderson31b265b2019-03-08 11:32:04 -08004091 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4092 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004093 ring_buffer_read_prepare_sync();
4094 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004095 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004096 }
4097
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004098 mutex_unlock(&trace_types_lock);
4099
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004100 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004101
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004102 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004103 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004104 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004105 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004106release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004107 seq_release_private(inode, file);
4108 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004109}
4110
4111int tracing_open_generic(struct inode *inode, struct file *filp)
4112{
Steven Rostedt60a11772008-05-12 21:20:44 +02004113 if (tracing_disabled)
4114 return -ENODEV;
4115
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004116 filp->private_data = inode->i_private;
4117 return 0;
4118}
4119
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004120bool tracing_is_disabled(void)
4121{
4122 return (tracing_disabled) ? true: false;
4123}
4124
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004125/*
4126 * Open and update trace_array ref count.
4127 * Must have the current trace_array passed to it.
4128 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04004129static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004130{
4131 struct trace_array *tr = inode->i_private;
4132
4133 if (tracing_disabled)
4134 return -ENODEV;
4135
4136 if (trace_array_get(tr) < 0)
4137 return -ENODEV;
4138
4139 filp->private_data = inode->i_private;
4140
4141 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004142}
4143
Hannes Eder4fd27352009-02-10 19:44:12 +01004144static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004145{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004146 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004147 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004148 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004149 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004150
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004151 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004152 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004153 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004154 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004155
Oleg Nesterov6484c712013-07-23 17:26:10 +02004156 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004157 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004158 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004159
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004160 for_each_tracing_cpu(cpu) {
4161 if (iter->buffer_iter[cpu])
4162 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4163 }
4164
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004165 if (iter->trace && iter->trace->close)
4166 iter->trace->close(iter);
4167
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004168 if (!iter->snapshot)
4169 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004170 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004171
4172 __trace_array_put(tr);
4173
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004174 mutex_unlock(&trace_types_lock);
4175
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004176 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004177 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004178 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004179 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004180 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004181
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004182 return 0;
4183}
4184
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004185static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4186{
4187 struct trace_array *tr = inode->i_private;
4188
4189 trace_array_put(tr);
4190 return 0;
4191}
4192
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004193static int tracing_single_release_tr(struct inode *inode, struct file *file)
4194{
4195 struct trace_array *tr = inode->i_private;
4196
4197 trace_array_put(tr);
4198
4199 return single_release(inode, file);
4200}
4201
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004202static int tracing_open(struct inode *inode, struct file *file)
4203{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004204 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004205 struct trace_iterator *iter;
4206 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004207
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004208 if (trace_array_get(tr) < 0)
4209 return -ENODEV;
4210
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004211 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004212 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4213 int cpu = tracing_get_cpu(inode);
Bo Yan8dd33bc2017-09-18 10:03:35 -07004214 struct trace_buffer *trace_buf = &tr->trace_buffer;
4215
4216#ifdef CONFIG_TRACER_MAX_TRACE
4217 if (tr->current_trace->print_max)
4218 trace_buf = &tr->max_buffer;
4219#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004220
4221 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004222 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004223 else
Bo Yan8dd33bc2017-09-18 10:03:35 -07004224 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004225 }
4226
4227 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004228 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004229 if (IS_ERR(iter))
4230 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004231 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004232 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4233 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004234
4235 if (ret < 0)
4236 trace_array_put(tr);
4237
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004238 return ret;
4239}
4240
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004241/*
4242 * Some tracers are not suitable for instance buffers.
4243 * A tracer is always available for the global array (toplevel)
4244 * or if it explicitly states that it is.
4245 */
4246static bool
4247trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4248{
4249 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4250}
4251
4252/* Find the next tracer that this trace array may use */
4253static struct tracer *
4254get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4255{
4256 while (t && !trace_ok_for_array(t, tr))
4257 t = t->next;
4258
4259 return t;
4260}
4261
Ingo Molnare309b412008-05-12 21:20:51 +02004262static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004263t_next(struct seq_file *m, void *v, loff_t *pos)
4264{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004265 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004266 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004267
4268 (*pos)++;
4269
4270 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004271 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004272
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004273 return t;
4274}
4275
4276static void *t_start(struct seq_file *m, loff_t *pos)
4277{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004278 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004279 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004280 loff_t l = 0;
4281
4282 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004283
4284 t = get_tracer_for_array(tr, trace_types);
4285 for (; t && l < *pos; t = t_next(m, t, &l))
4286 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004287
4288 return t;
4289}
4290
4291static void t_stop(struct seq_file *m, void *p)
4292{
4293 mutex_unlock(&trace_types_lock);
4294}
4295
4296static int t_show(struct seq_file *m, void *v)
4297{
4298 struct tracer *t = v;
4299
4300 if (!t)
4301 return 0;
4302
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004303 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004304 if (t->next)
4305 seq_putc(m, ' ');
4306 else
4307 seq_putc(m, '\n');
4308
4309 return 0;
4310}
4311
James Morris88e9d342009-09-22 16:43:43 -07004312static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004313 .start = t_start,
4314 .next = t_next,
4315 .stop = t_stop,
4316 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004317};
4318
4319static int show_traces_open(struct inode *inode, struct file *file)
4320{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004321 struct trace_array *tr = inode->i_private;
4322 struct seq_file *m;
4323 int ret;
4324
Steven Rostedt60a11772008-05-12 21:20:44 +02004325 if (tracing_disabled)
4326 return -ENODEV;
4327
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004328 ret = seq_open(file, &show_traces_seq_ops);
4329 if (ret)
4330 return ret;
4331
4332 m = file->private_data;
4333 m->private = tr;
4334
4335 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004336}
4337
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004338static ssize_t
4339tracing_write_stub(struct file *filp, const char __user *ubuf,
4340 size_t count, loff_t *ppos)
4341{
4342 return count;
4343}
4344
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004345loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004346{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004347 int ret;
4348
Slava Pestov364829b2010-11-24 15:13:16 -08004349 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004350 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004351 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004352 file->f_pos = ret = 0;
4353
4354 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004355}
4356
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004357static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004358 .open = tracing_open,
4359 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004360 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004361 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004362 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004363};
4364
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004365static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004366 .open = show_traces_open,
4367 .read = seq_read,
4368 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004369 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004370};
4371
4372static ssize_t
4373tracing_cpumask_read(struct file *filp, char __user *ubuf,
4374 size_t count, loff_t *ppos)
4375{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004376 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004377 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004378 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004379
Changbin Du90e406f2017-11-30 11:39:43 +08004380 len = snprintf(NULL, 0, "%*pb\n",
4381 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4382 mask_str = kmalloc(len, GFP_KERNEL);
4383 if (!mask_str)
4384 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004385
Changbin Du90e406f2017-11-30 11:39:43 +08004386 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004387 cpumask_pr_args(tr->tracing_cpumask));
4388 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004389 count = -EINVAL;
4390 goto out_err;
4391 }
Changbin Du90e406f2017-11-30 11:39:43 +08004392 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004393
4394out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004395 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004396
4397 return count;
4398}
4399
4400static ssize_t
4401tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4402 size_t count, loff_t *ppos)
4403{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004404 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304405 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004406 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304407
4408 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4409 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004410
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304411 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004412 if (err)
4413 goto err_unlock;
4414
Steven Rostedta5e25882008-12-02 15:34:05 -05004415 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004416 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004417 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004418 /*
4419 * Increase/decrease the disabled counter if we are
4420 * about to flip a bit in the cpumask:
4421 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004422 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304423 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004424 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4425 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004426 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004427 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304428 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004429 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4430 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004431 }
4432 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004433 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05004434 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02004435
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004436 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304437 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004438
Ingo Molnarc7078de2008-05-12 21:20:52 +02004439 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004440
4441err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08004442 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004443
4444 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004445}
4446
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004447static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004448 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004449 .read = tracing_cpumask_read,
4450 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004451 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004452 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004453};
4454
Li Zefanfdb372e2009-12-08 11:15:59 +08004455static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004456{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004457 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004458 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004459 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004460 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004461
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004462 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004463 tracer_flags = tr->current_trace->flags->val;
4464 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004465
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004466 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004467 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08004468 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004469 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004470 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004471 }
4472
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004473 for (i = 0; trace_opts[i].name; i++) {
4474 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08004475 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004476 else
Li Zefanfdb372e2009-12-08 11:15:59 +08004477 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004478 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05004479 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004480
Li Zefanfdb372e2009-12-08 11:15:59 +08004481 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004482}
4483
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004484static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08004485 struct tracer_flags *tracer_flags,
4486 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004487{
Chunyu Hud39cdd22016-03-08 21:37:01 +08004488 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004489 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004490
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004491 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004492 if (ret)
4493 return ret;
4494
4495 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08004496 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004497 else
Zhaolei77708412009-08-07 18:53:21 +08004498 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004499 return 0;
4500}
4501
Li Zefan8d18eaa2009-12-08 11:17:06 +08004502/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004503static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08004504{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004505 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08004506 struct tracer_flags *tracer_flags = trace->flags;
4507 struct tracer_opt *opts = NULL;
4508 int i;
4509
4510 for (i = 0; tracer_flags->opts[i].name; i++) {
4511 opts = &tracer_flags->opts[i];
4512
4513 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004514 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08004515 }
4516
4517 return -EINVAL;
4518}
4519
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004520/* Some tracers require overwrite to stay enabled */
4521int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4522{
4523 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4524 return -1;
4525
4526 return 0;
4527}
4528
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004529int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004530{
4531 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004532 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004533 return 0;
4534
4535 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004536 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05004537 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004538 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004539
4540 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004541 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004542 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004543 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08004544
4545 if (mask == TRACE_ITER_RECORD_CMD)
4546 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08004547
Joel Fernandesd914ba32017-06-26 19:01:55 -07004548 if (mask == TRACE_ITER_RECORD_TGID) {
4549 if (!tgid_map)
Kees Cook6396bb22018-06-12 14:03:40 -07004550 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4551 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07004552 GFP_KERNEL);
4553 if (!tgid_map) {
4554 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4555 return -ENOMEM;
4556 }
4557
4558 trace_event_enable_tgid_record(enabled);
4559 }
4560
Steven Rostedtc37775d2016-04-13 16:59:18 -04004561 if (mask == TRACE_ITER_EVENT_FORK)
4562 trace_event_follow_fork(tr, enabled);
4563
Namhyung Kim1e104862017-04-17 11:44:28 +09004564 if (mask == TRACE_ITER_FUNC_FORK)
4565 ftrace_pid_follow_fork(tr, enabled);
4566
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004567 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004568 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004569#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004570 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04004571#endif
4572 }
Steven Rostedt81698832012-10-11 10:15:05 -04004573
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004574 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04004575 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04004576 trace_printk_control(enabled);
4577 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004578
4579 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04004580}
4581
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004582static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004583{
Li Zefan8d18eaa2009-12-08 11:17:06 +08004584 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004585 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08004586 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004587 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004588 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004589
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004590 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004591
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004592 len = str_has_prefix(cmp, "no");
4593 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004594 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05004595
4596 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004597
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004598 mutex_lock(&trace_types_lock);
4599
Yisheng Xie591a0332018-05-17 16:36:03 +08004600 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01004601 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08004602 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05004603 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08004604 else
4605 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04004606
4607 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004608
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004609 /*
4610 * If the first trailing whitespace is replaced with '\0' by strstrip,
4611 * turn it back into a space.
4612 */
4613 if (orig_len > strlen(option))
4614 option[strlen(option)] = ' ';
4615
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004616 return ret;
4617}
4618
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004619static void __init apply_trace_boot_options(void)
4620{
4621 char *buf = trace_boot_options_buf;
4622 char *option;
4623
4624 while (true) {
4625 option = strsep(&buf, ",");
4626
4627 if (!option)
4628 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004629
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05004630 if (*option)
4631 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08004632
4633 /* Put back the comma to allow this to be called again */
4634 if (buf)
4635 *(buf - 1) = ',';
4636 }
4637}
4638
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004639static ssize_t
4640tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4641 size_t cnt, loff_t *ppos)
4642{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004643 struct seq_file *m = filp->private_data;
4644 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004645 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004646 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004647
4648 if (cnt >= sizeof(buf))
4649 return -EINVAL;
4650
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08004651 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004652 return -EFAULT;
4653
Steven Rostedta8dd2172013-01-09 20:54:17 -05004654 buf[cnt] = 0;
4655
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004656 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004657 if (ret < 0)
4658 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04004659
Jiri Olsacf8517c2009-10-23 19:36:16 -04004660 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004661
4662 return cnt;
4663}
4664
Li Zefanfdb372e2009-12-08 11:15:59 +08004665static int tracing_trace_options_open(struct inode *inode, struct file *file)
4666{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004667 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004668 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004669
Li Zefanfdb372e2009-12-08 11:15:59 +08004670 if (tracing_disabled)
4671 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004672
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004673 if (trace_array_get(tr) < 0)
4674 return -ENODEV;
4675
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004676 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4677 if (ret < 0)
4678 trace_array_put(tr);
4679
4680 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08004681}
4682
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004683static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08004684 .open = tracing_trace_options_open,
4685 .read = seq_read,
4686 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004687 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05004688 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004689};
4690
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004691static const char readme_msg[] =
4692 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004693 "# echo 0 > tracing_on : quick way to disable tracing\n"
4694 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4695 " Important files:\n"
4696 " trace\t\t\t- The static contents of the buffer\n"
4697 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4698 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4699 " current_tracer\t- function and latency tracers\n"
4700 " available_tracers\t- list of configured tracers for current_tracer\n"
4701 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4702 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4703 " trace_clock\t\t-change the clock used to order events\n"
4704 " local: Per cpu clock but may not be synced across CPUs\n"
4705 " global: Synced across CPUs but slows tracing down.\n"
4706 " counter: Not a clock, but just an increment\n"
4707 " uptime: Jiffy counter from time of boot\n"
4708 " perf: Same clock that perf events use\n"
4709#ifdef CONFIG_X86_64
4710 " x86-tsc: TSC cycle counter\n"
4711#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06004712 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4713 " delta: Delta difference against a buffer-wide timestamp\n"
4714 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004715 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04004716 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004717 " tracing_cpumask\t- Limit which CPUs to trace\n"
4718 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4719 "\t\t\t Remove sub-buffer with rmdir\n"
4720 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004721 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4722 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004723 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004724#ifdef CONFIG_DYNAMIC_FTRACE
4725 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004726 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4727 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09004728 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004729 "\t modules: Can select a group via module\n"
4730 "\t Format: :mod:<module-name>\n"
4731 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4732 "\t triggers: a command to perform when function is hit\n"
4733 "\t Format: <function>:<trigger>[:count]\n"
4734 "\t trigger: traceon, traceoff\n"
4735 "\t\t enable_event:<system>:<event>\n"
4736 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004737#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004738 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004739#endif
4740#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004741 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004742#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04004743 "\t\t dump\n"
4744 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004745 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4746 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4747 "\t The first one will disable tracing every time do_fault is hit\n"
4748 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4749 "\t The first time do trap is hit and it disables tracing, the\n"
4750 "\t counter will decrement to 2. If tracing is already disabled,\n"
4751 "\t the counter will not decrement. It only decrements when the\n"
4752 "\t trigger did work\n"
4753 "\t To remove trigger without count:\n"
4754 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4755 "\t To remove trigger with a count:\n"
4756 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004757 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004758 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4759 "\t modules: Can select a group via module command :mod:\n"
4760 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004761#endif /* CONFIG_DYNAMIC_FTRACE */
4762#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004763 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4764 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004765#endif
4766#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4767 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09004768 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004769 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4770#endif
4771#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004772 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4773 "\t\t\t snapshot buffer. Read the contents for more\n"
4774 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004775#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004776#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004777 " stack_trace\t\t- Shows the max stack trace when active\n"
4778 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004779 "\t\t\t Write into this file to reset the max size (trigger a\n"
4780 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004781#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004782 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4783 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04004784#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08004785#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09004786#ifdef CONFIG_DYNAMIC_EVENTS
4787 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4788 "\t\t\t Write into this file to define/undefine new trace events.\n"
4789#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004790#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004791 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4792 "\t\t\t Write into this file to define/undefine new trace events.\n"
4793#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004794#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004795 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4796 "\t\t\t Write into this file to define/undefine new trace events.\n"
4797#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004798#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09004799 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09004800 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4801 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09004802#ifdef CONFIG_HIST_TRIGGERS
4803 "\t s:[synthetic/]<event> <field> [<field>]\n"
4804#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09004805 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004806#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09004807 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Naveen N. Rao35b6f552017-02-22 19:23:39 +05304808 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004809#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11004810#ifdef CONFIG_UPROBE_EVENTS
Ravi Bangoria1cc33162018-08-20 10:12:47 +05304811 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09004812#endif
4813 "\t args: <name>=fetcharg[:type]\n"
4814 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004815#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4816 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4817#else
Masami Hiramatsu86425622016-08-18 17:58:15 +09004818 "\t $stack<index>, $stack, $retval, $comm\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09004819#endif
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09004820 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09004821 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4822 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09004823#ifdef CONFIG_HIST_TRIGGERS
4824 "\t field: <stype> <name>;\n"
4825 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4826 "\t [unsigned] char/int/long\n"
4827#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09004828#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004829 " events/\t\t- Directory containing all trace event subsystems:\n"
4830 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4831 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004832 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4833 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004834 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004835 " events/<system>/<event>/\t- Directory containing control files for\n"
4836 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004837 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4838 " filter\t\t- If set, only events passing filter are traced\n"
4839 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004840 "\t Format: <trigger>[:count][if <filter>]\n"
4841 "\t trigger: traceon, traceoff\n"
4842 "\t enable_event:<system>:<event>\n"
4843 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004844#ifdef CONFIG_HIST_TRIGGERS
4845 "\t enable_hist:<system>:<event>\n"
4846 "\t disable_hist:<system>:<event>\n"
4847#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06004848#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004849 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004850#endif
4851#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004852 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06004853#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004854#ifdef CONFIG_HIST_TRIGGERS
4855 "\t\t hist (see below)\n"
4856#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05004857 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4858 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4859 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4860 "\t events/block/block_unplug/trigger\n"
4861 "\t The first disables tracing every time block_unplug is hit.\n"
4862 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4863 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4864 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4865 "\t Like function triggers, the counter is only decremented if it\n"
4866 "\t enabled or disabled tracing.\n"
4867 "\t To remove a trigger without a count:\n"
4868 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4869 "\t To remove a trigger with a count:\n"
4870 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4871 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004872#ifdef CONFIG_HIST_TRIGGERS
4873 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06004874 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004875 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06004876 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004877 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004878 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004879 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004880 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004881 "\t [if <filter>]\n\n"
4882 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06004883 "\t table using the key(s) and value(s) named, and the value of a\n"
4884 "\t sum called 'hitcount' is incremented. Keys and values\n"
4885 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06004886 "\t can be any field, or the special string 'stacktrace'.\n"
4887 "\t Compound keys consisting of up to two fields can be specified\n"
4888 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4889 "\t fields. Sort keys consisting of up to two fields can be\n"
4890 "\t specified using the 'sort' keyword. The sort direction can\n"
4891 "\t be modified by appending '.descending' or '.ascending' to a\n"
4892 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004893 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4894 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4895 "\t its histogram data will be shared with other triggers of the\n"
4896 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004897 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06004898 "\t table in its entirety to stdout. If there are multiple hist\n"
4899 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06004900 "\t trigger in the output. The table displayed for a named\n"
4901 "\t trigger will be the same as any other instance having the\n"
4902 "\t same name. The default format used to display a given field\n"
4903 "\t can be modified by appending any of the following modifiers\n"
4904 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06004905 "\t .hex display a number as a hex value\n"
4906 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06004907 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06004908 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06004909 "\t .syscall display a syscall id as a syscall name\n"
4910 "\t .log2 display log2 value rather than raw number\n"
4911 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06004912 "\t The 'pause' parameter can be used to pause an existing hist\n"
4913 "\t trigger or to start a hist trigger but not log any events\n"
4914 "\t until told to do so. 'continue' can be used to start or\n"
4915 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06004916 "\t The 'clear' parameter will clear the contents of a running\n"
4917 "\t hist trigger and leave its current paused/active state\n"
4918 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06004919 "\t The enable_hist and disable_hist triggers can be used to\n"
4920 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00004921 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004922 "\t the enable_event and disable_event triggers.\n\n"
4923 "\t Hist trigger handlers and actions are executed whenever a\n"
4924 "\t a histogram entry is added or updated. They take the form:\n\n"
4925 "\t <handler>.<action>\n\n"
4926 "\t The available handlers are:\n\n"
4927 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06004928 "\t onmax(var) - invoke if var exceeds current max\n"
4929 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004930 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06004931 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06004932 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06004933#ifdef CONFIG_TRACER_SNAPSHOT
4934 "\t snapshot() - snapshot the trace buffer\n"
4935#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06004936#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004937;
4938
4939static ssize_t
4940tracing_readme_read(struct file *filp, char __user *ubuf,
4941 size_t cnt, loff_t *ppos)
4942{
4943 return simple_read_from_buffer(ubuf, cnt, ppos,
4944 readme_msg, strlen(readme_msg));
4945}
4946
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004947static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004948 .open = tracing_open_generic,
4949 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004950 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02004951};
4952
Michael Sartain99c621d2017-07-05 22:07:15 -06004953static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4954{
4955 int *ptr = v;
4956
4957 if (*pos || m->count)
4958 ptr++;
4959
4960 (*pos)++;
4961
4962 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4963 if (trace_find_tgid(*ptr))
4964 return ptr;
4965 }
4966
4967 return NULL;
4968}
4969
4970static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4971{
4972 void *v;
4973 loff_t l = 0;
4974
4975 if (!tgid_map)
4976 return NULL;
4977
4978 v = &tgid_map[0];
4979 while (l <= *pos) {
4980 v = saved_tgids_next(m, v, &l);
4981 if (!v)
4982 return NULL;
4983 }
4984
4985 return v;
4986}
4987
4988static void saved_tgids_stop(struct seq_file *m, void *v)
4989{
4990}
4991
4992static int saved_tgids_show(struct seq_file *m, void *v)
4993{
4994 int pid = (int *)v - tgid_map;
4995
4996 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4997 return 0;
4998}
4999
5000static const struct seq_operations tracing_saved_tgids_seq_ops = {
5001 .start = saved_tgids_start,
5002 .stop = saved_tgids_stop,
5003 .next = saved_tgids_next,
5004 .show = saved_tgids_show,
5005};
5006
5007static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5008{
5009 if (tracing_disabled)
5010 return -ENODEV;
5011
5012 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5013}
5014
5015
5016static const struct file_operations tracing_saved_tgids_fops = {
5017 .open = tracing_saved_tgids_open,
5018 .read = seq_read,
5019 .llseek = seq_lseek,
5020 .release = seq_release,
5021};
5022
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005023static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005024{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005025 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005026
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005027 if (*pos || m->count)
5028 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005029
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005030 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005031
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005032 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5033 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005034 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005035 continue;
5036
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005037 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005038 }
5039
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005040 return NULL;
5041}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005042
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005043static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5044{
5045 void *v;
5046 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005047
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005048 preempt_disable();
5049 arch_spin_lock(&trace_cmdline_lock);
5050
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005051 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005052 while (l <= *pos) {
5053 v = saved_cmdlines_next(m, v, &l);
5054 if (!v)
5055 return NULL;
5056 }
5057
5058 return v;
5059}
5060
5061static void saved_cmdlines_stop(struct seq_file *m, void *v)
5062{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005063 arch_spin_unlock(&trace_cmdline_lock);
5064 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005065}
5066
5067static int saved_cmdlines_show(struct seq_file *m, void *v)
5068{
5069 char buf[TASK_COMM_LEN];
5070 unsigned int *pid = v;
5071
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005072 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005073 seq_printf(m, "%d %s\n", *pid, buf);
5074 return 0;
5075}
5076
5077static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5078 .start = saved_cmdlines_start,
5079 .next = saved_cmdlines_next,
5080 .stop = saved_cmdlines_stop,
5081 .show = saved_cmdlines_show,
5082};
5083
5084static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5085{
5086 if (tracing_disabled)
5087 return -ENODEV;
5088
5089 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005090}
5091
5092static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005093 .open = tracing_saved_cmdlines_open,
5094 .read = seq_read,
5095 .llseek = seq_lseek,
5096 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005097};
5098
5099static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005100tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5101 size_t cnt, loff_t *ppos)
5102{
5103 char buf[64];
5104 int r;
5105
5106 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005107 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005108 arch_spin_unlock(&trace_cmdline_lock);
5109
5110 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5111}
5112
5113static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5114{
5115 kfree(s->saved_cmdlines);
5116 kfree(s->map_cmdline_to_pid);
5117 kfree(s);
5118}
5119
5120static int tracing_resize_saved_cmdlines(unsigned int val)
5121{
5122 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5123
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005124 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005125 if (!s)
5126 return -ENOMEM;
5127
5128 if (allocate_cmdlines_buffer(val, s) < 0) {
5129 kfree(s);
5130 return -ENOMEM;
5131 }
5132
5133 arch_spin_lock(&trace_cmdline_lock);
5134 savedcmd_temp = savedcmd;
5135 savedcmd = s;
5136 arch_spin_unlock(&trace_cmdline_lock);
5137 free_saved_cmdlines_buffer(savedcmd_temp);
5138
5139 return 0;
5140}
5141
5142static ssize_t
5143tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5144 size_t cnt, loff_t *ppos)
5145{
5146 unsigned long val;
5147 int ret;
5148
5149 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5150 if (ret)
5151 return ret;
5152
5153 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5154 if (!val || val > PID_MAX_DEFAULT)
5155 return -EINVAL;
5156
5157 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5158 if (ret < 0)
5159 return ret;
5160
5161 *ppos += cnt;
5162
5163 return cnt;
5164}
5165
5166static const struct file_operations tracing_saved_cmdlines_size_fops = {
5167 .open = tracing_open_generic,
5168 .read = tracing_saved_cmdlines_size_read,
5169 .write = tracing_saved_cmdlines_size_write,
5170};
5171
Jeremy Linton681bec02017-05-31 16:56:53 -05005172#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005173static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005174update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005175{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005176 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005177 if (ptr->tail.next) {
5178 ptr = ptr->tail.next;
5179 /* Set ptr to the next real item (skip head) */
5180 ptr++;
5181 } else
5182 return NULL;
5183 }
5184 return ptr;
5185}
5186
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005187static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005188{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005189 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005190
5191 /*
5192 * Paranoid! If ptr points to end, we don't want to increment past it.
5193 * This really should never happen.
5194 */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005195 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005196 if (WARN_ON_ONCE(!ptr))
5197 return NULL;
5198
5199 ptr++;
5200
5201 (*pos)++;
5202
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005203 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005204
5205 return ptr;
5206}
5207
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005208static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005209{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005210 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005211 loff_t l = 0;
5212
Jeremy Linton1793ed92017-05-31 16:56:46 -05005213 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005214
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005215 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005216 if (v)
5217 v++;
5218
5219 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005220 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005221 }
5222
5223 return v;
5224}
5225
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005226static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005227{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005228 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005229}
5230
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005231static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005232{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005233 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005234
5235 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005236 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005237 ptr->map.system);
5238
5239 return 0;
5240}
5241
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005242static const struct seq_operations tracing_eval_map_seq_ops = {
5243 .start = eval_map_start,
5244 .next = eval_map_next,
5245 .stop = eval_map_stop,
5246 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005247};
5248
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005249static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005250{
5251 if (tracing_disabled)
5252 return -ENODEV;
5253
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005254 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005255}
5256
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005257static const struct file_operations tracing_eval_map_fops = {
5258 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005259 .read = seq_read,
5260 .llseek = seq_lseek,
5261 .release = seq_release,
5262};
5263
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005264static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005265trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005266{
5267 /* Return tail of array given the head */
5268 return ptr + ptr->head.length + 1;
5269}
5270
5271static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005272trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005273 int len)
5274{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005275 struct trace_eval_map **stop;
5276 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005277 union trace_eval_map_item *map_array;
5278 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005279
5280 stop = start + len;
5281
5282 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005283 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005284 * where the head holds the module and length of array, and the
5285 * tail holds a pointer to the next list.
5286 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005287 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005288 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005289 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005290 return;
5291 }
5292
Jeremy Linton1793ed92017-05-31 16:56:46 -05005293 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005294
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005295 if (!trace_eval_maps)
5296 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005297 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005298 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005299 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005300 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005301 if (!ptr->tail.next)
5302 break;
5303 ptr = ptr->tail.next;
5304
5305 }
5306 ptr->tail.next = map_array;
5307 }
5308 map_array->head.mod = mod;
5309 map_array->head.length = len;
5310 map_array++;
5311
5312 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5313 map_array->map = **map;
5314 map_array++;
5315 }
5316 memset(map_array, 0, sizeof(*map_array));
5317
Jeremy Linton1793ed92017-05-31 16:56:46 -05005318 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005319}
5320
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005321static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005322{
Jeremy Linton681bec02017-05-31 16:56:53 -05005323 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005324 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005325}
5326
Jeremy Linton681bec02017-05-31 16:56:53 -05005327#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005328static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5329static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005330 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005331#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005332
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005333static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005334 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005335{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005336 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005337
5338 if (len <= 0)
5339 return;
5340
5341 map = start;
5342
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005343 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005344
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005345 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005346}
5347
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005348static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005349tracing_set_trace_read(struct file *filp, char __user *ubuf,
5350 size_t cnt, loff_t *ppos)
5351{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005352 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005353 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005354 int r;
5355
5356 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005357 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005358 mutex_unlock(&trace_types_lock);
5359
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005360 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005361}
5362
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005363int tracer_init(struct tracer *t, struct trace_array *tr)
5364{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005365 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005366 return t->init(tr);
5367}
5368
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005369static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005370{
5371 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005372
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005373 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005374 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005375}
5376
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005377#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09005378/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005379static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5380 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09005381{
5382 int cpu, ret = 0;
5383
5384 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5385 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005386 ret = ring_buffer_resize(trace_buf->buffer,
5387 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005388 if (ret < 0)
5389 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005390 per_cpu_ptr(trace_buf->data, cpu)->entries =
5391 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005392 }
5393 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005394 ret = ring_buffer_resize(trace_buf->buffer,
5395 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005396 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005397 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5398 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09005399 }
5400
5401 return ret;
5402}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005403#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09005404
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005405static int __tracing_resize_ring_buffer(struct trace_array *tr,
5406 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04005407{
5408 int ret;
5409
5410 /*
5411 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04005412 * we use the size that was given, and we can forget about
5413 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04005414 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05005415 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04005416
Steven Rostedtb382ede62012-10-10 21:44:34 -04005417 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005418 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04005419 return 0;
5420
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005421 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005422 if (ret < 0)
5423 return ret;
5424
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005425#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005426 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5427 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005428 goto out;
5429
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005430 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005431 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005432 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5433 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04005434 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04005435 /*
5436 * AARGH! We are left with different
5437 * size max buffer!!!!
5438 * The max buffer is our "snapshot" buffer.
5439 * When a tracer needs a snapshot (one of the
5440 * latency tracers), it swaps the max buffer
5441 * with the saved snap shot. We succeeded to
5442 * update the size of the main buffer, but failed to
5443 * update the size of the max buffer. But when we tried
5444 * to reset the main buffer to the original size, we
5445 * failed there too. This is very unlikely to
5446 * happen, but if it does, warn and kill all
5447 * tracing.
5448 */
Steven Rostedt73c51622009-03-11 13:42:01 -04005449 WARN_ON(1);
5450 tracing_disabled = 1;
5451 }
5452 return ret;
5453 }
5454
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005455 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005456 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005457 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005458 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005459
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005460 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005461#endif /* CONFIG_TRACER_MAX_TRACE */
5462
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005463 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005464 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005465 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005466 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04005467
5468 return ret;
5469}
5470
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005471static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5472 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005473{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07005474 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005475
5476 mutex_lock(&trace_types_lock);
5477
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005478 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5479 /* make sure, this cpu is enabled in the mask */
5480 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5481 ret = -EINVAL;
5482 goto out;
5483 }
5484 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005485
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005486 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005487 if (ret < 0)
5488 ret = -ENOMEM;
5489
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005490out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005491 mutex_unlock(&trace_types_lock);
5492
5493 return ret;
5494}
5495
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005496
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005497/**
5498 * tracing_update_buffers - used by tracing facility to expand ring buffers
5499 *
5500 * To save on memory when the tracing is never used on a system with it
5501 * configured in. The ring buffers are set to a minimum size. But once
5502 * a user starts to use the tracing facility, then they need to grow
5503 * to their default size.
5504 *
5505 * This function is to be called when a tracer is about to be used.
5506 */
5507int tracing_update_buffers(void)
5508{
5509 int ret = 0;
5510
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005511 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005512 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005513 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005514 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005515 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04005516
5517 return ret;
5518}
5519
Steven Rostedt577b7852009-02-26 23:43:05 -05005520struct trace_option_dentry;
5521
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005522static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005523create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05005524
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005525/*
5526 * Used to clear out the tracer before deletion of an instance.
5527 * Must have trace_types_lock held.
5528 */
5529static void tracing_set_nop(struct trace_array *tr)
5530{
5531 if (tr->current_trace == &nop_trace)
5532 return;
5533
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005534 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05005535
5536 if (tr->current_trace->reset)
5537 tr->current_trace->reset(tr);
5538
5539 tr->current_trace = &nop_trace;
5540}
5541
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04005542static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005543{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005544 /* Only enable if the directory has been created already. */
5545 if (!tr->dir)
5546 return;
5547
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04005548 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05005549}
5550
5551static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5552{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005553 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005554#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005555 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005556#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005557 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005558
Steven Rostedt1027fcb2009-03-12 11:33:20 -04005559 mutex_lock(&trace_types_lock);
5560
Steven Rostedt73c51622009-03-11 13:42:01 -04005561 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005562 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005563 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04005564 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01005565 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04005566 ret = 0;
5567 }
5568
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005569 for (t = trace_types; t; t = t->next) {
5570 if (strcmp(t->name, buf) == 0)
5571 break;
5572 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005573 if (!t) {
5574 ret = -EINVAL;
5575 goto out;
5576 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005577 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005578 goto out;
5579
Tom Zanussia35873a2019-02-13 17:42:45 -06005580#ifdef CONFIG_TRACER_SNAPSHOT
5581 if (t->use_max_tr) {
5582 arch_spin_lock(&tr->max_lock);
5583 if (tr->cond_snapshot)
5584 ret = -EBUSY;
5585 arch_spin_unlock(&tr->max_lock);
5586 if (ret)
5587 goto out;
5588 }
5589#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08005590 /* Some tracers won't work on kernel command line */
5591 if (system_state < SYSTEM_RUNNING && t->noboot) {
5592 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5593 t->name);
5594 goto out;
5595 }
5596
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005597 /* Some tracers are only allowed for the top level buffer */
5598 if (!trace_ok_for_array(t, tr)) {
5599 ret = -EINVAL;
5600 goto out;
5601 }
5602
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005603 /* If trace pipe files are being read, we can't change the tracer */
5604 if (tr->current_trace->ref) {
5605 ret = -EBUSY;
5606 goto out;
5607 }
5608
Steven Rostedt9f029e82008-11-12 15:24:24 -05005609 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005610
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005611 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005612
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005613 if (tr->current_trace->reset)
5614 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05005615
Paul E. McKenney74401722018-11-06 18:44:52 -08005616 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005617 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05005618
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005619#ifdef CONFIG_TRACER_MAX_TRACE
5620 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05005621
5622 if (had_max_tr && !t->use_max_tr) {
5623 /*
5624 * We need to make sure that the update_max_tr sees that
5625 * current_trace changed to nop_trace to keep it from
5626 * swapping the buffers after we resize it.
5627 * The update_max_tr is called from interrupts disabled
5628 * so a synchronized_sched() is sufficient.
5629 */
Paul E. McKenney74401722018-11-06 18:44:52 -08005630 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005631 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005632 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005633#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634
5635#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05005636 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04005637 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09005638 if (ret < 0)
5639 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09005640 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005641#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05005642
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005643 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005644 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01005645 if (ret)
5646 goto out;
5647 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005648
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005649 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05005650 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05005651 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005652 out:
5653 mutex_unlock(&trace_types_lock);
5654
Peter Zijlstrad9e54072008-11-01 19:57:37 +01005655 return ret;
5656}
5657
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005658static ssize_t
5659tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5660 size_t cnt, loff_t *ppos)
5661{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005662 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005663 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005664 int i;
5665 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005666 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005667
Steven Rostedt60063a62008-10-28 10:44:24 -04005668 ret = cnt;
5669
Li Zefanee6c2c12009-09-18 14:06:47 +08005670 if (cnt > MAX_TRACER_SIZE)
5671 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005672
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005673 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005674 return -EFAULT;
5675
5676 buf[cnt] = 0;
5677
5678 /* strip ending whitespace. */
5679 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5680 buf[i] = 0;
5681
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005682 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01005683 if (err)
5684 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005685
Jiri Olsacf8517c2009-10-23 19:36:16 -04005686 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005687
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02005688 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005689}
5690
5691static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005692tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5693 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005694{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005695 char buf[64];
5696 int r;
5697
Steven Rostedtcffae432008-05-12 21:21:00 +02005698 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005699 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02005700 if (r > sizeof(buf))
5701 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005702 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005703}
5704
5705static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005706tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5707 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005708{
Hannes Eder5e398412009-02-10 19:44:34 +01005709 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005710 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005711
Peter Huewe22fe9b52011-06-07 21:58:27 +02005712 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5713 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005714 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005715
5716 *ptr = val * 1000;
5717
5718 return cnt;
5719}
5720
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005721static ssize_t
5722tracing_thresh_read(struct file *filp, char __user *ubuf,
5723 size_t cnt, loff_t *ppos)
5724{
5725 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5726}
5727
5728static ssize_t
5729tracing_thresh_write(struct file *filp, const char __user *ubuf,
5730 size_t cnt, loff_t *ppos)
5731{
5732 struct trace_array *tr = filp->private_data;
5733 int ret;
5734
5735 mutex_lock(&trace_types_lock);
5736 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5737 if (ret < 0)
5738 goto out;
5739
5740 if (tr->current_trace->update_thresh) {
5741 ret = tr->current_trace->update_thresh(tr);
5742 if (ret < 0)
5743 goto out;
5744 }
5745
5746 ret = cnt;
5747out:
5748 mutex_unlock(&trace_types_lock);
5749
5750 return ret;
5751}
5752
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04005753#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08005754
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005755static ssize_t
5756tracing_max_lat_read(struct file *filp, char __user *ubuf,
5757 size_t cnt, loff_t *ppos)
5758{
5759 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5760}
5761
5762static ssize_t
5763tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5764 size_t cnt, loff_t *ppos)
5765{
5766 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5767}
5768
Chen Gange428abb2015-11-10 05:15:15 +08005769#endif
5770
Steven Rostedtb3806b42008-05-12 21:20:46 +02005771static int tracing_open_pipe(struct inode *inode, struct file *filp)
5772{
Oleg Nesterov15544202013-07-23 17:25:57 +02005773 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005774 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005775 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005776
5777 if (tracing_disabled)
5778 return -ENODEV;
5779
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005780 if (trace_array_get(tr) < 0)
5781 return -ENODEV;
5782
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005783 mutex_lock(&trace_types_lock);
5784
Steven Rostedtb3806b42008-05-12 21:20:46 +02005785 /* create a buffer to store the information to pass to userspace */
5786 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005787 if (!iter) {
5788 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005789 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005790 goto out;
5791 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005792
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04005793 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005794 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005795
5796 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5797 ret = -ENOMEM;
5798 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10305799 }
5800
Steven Rostedta3097202008-11-07 22:36:02 -05005801 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10305802 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05005803
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005804 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04005805 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5806
David Sharp8be07092012-11-13 12:18:22 -08005807 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005808 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08005809 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5810
Oleg Nesterov15544202013-07-23 17:25:57 +02005811 iter->tr = tr;
5812 iter->trace_buffer = &tr->trace_buffer;
5813 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005814 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005815 filp->private_data = iter;
5816
Steven Rostedt107bad82008-05-12 21:21:01 +02005817 if (iter->trace->pipe_open)
5818 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02005819
Arnd Bergmannb4447862010-07-07 23:40:11 +02005820 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005821
5822 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005823out:
5824 mutex_unlock(&trace_types_lock);
5825 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005826
5827fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005828 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005829 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005830 mutex_unlock(&trace_types_lock);
5831 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005832}
5833
5834static int tracing_release_pipe(struct inode *inode, struct file *file)
5835{
5836 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02005837 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005838
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005839 mutex_lock(&trace_types_lock);
5840
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005841 tr->current_trace->ref--;
5842
Steven Rostedt29bf4a52009-12-09 12:37:43 -05005843 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05005844 iter->trace->pipe_close(iter);
5845
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005846 mutex_unlock(&trace_types_lock);
5847
Rusty Russell44623442009-01-01 10:12:23 +10305848 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005849 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005850 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005851
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005852 trace_array_put(tr);
5853
Steven Rostedtb3806b42008-05-12 21:20:46 +02005854 return 0;
5855}
5856
Al Viro9dd95742017-07-03 00:42:43 -04005857static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005858trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005859{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005860 struct trace_array *tr = iter->tr;
5861
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005862 /* Iterators are static, they should be filled or empty */
5863 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005864 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005865
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005866 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005867 /*
5868 * Always select as readable when in blocking mode
5869 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08005870 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005871 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005872 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05005873 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005874}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005875
Al Viro9dd95742017-07-03 00:42:43 -04005876static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005877tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5878{
5879 struct trace_iterator *iter = filp->private_data;
5880
5881 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005882}
5883
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005884/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005885static int tracing_wait_pipe(struct file *filp)
5886{
5887 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005888 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005889
5890 while (trace_empty(iter)) {
5891
5892 if ((filp->f_flags & O_NONBLOCK)) {
5893 return -EAGAIN;
5894 }
5895
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005896 /*
Liu Bo250bfd32013-01-14 10:54:11 +08005897 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005898 * We still block if tracing is disabled, but we have never
5899 * read anything. This allows a user to cat this file, and
5900 * then enable tracing. But after we have read something,
5901 * we give an EOF when tracing is again disabled.
5902 *
5903 * iter->pos will be 0 if we haven't read anything.
5904 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07005905 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005906 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005907
5908 mutex_unlock(&iter->mutex);
5909
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05005910 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04005911
5912 mutex_lock(&iter->mutex);
5913
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005914 if (ret)
5915 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005916 }
5917
5918 return 1;
5919}
5920
Steven Rostedtb3806b42008-05-12 21:20:46 +02005921/*
5922 * Consumer reader.
5923 */
5924static ssize_t
5925tracing_read_pipe(struct file *filp, char __user *ubuf,
5926 size_t cnt, loff_t *ppos)
5927{
5928 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005929 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005930
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005931 /*
5932 * Avoid more than one consumer on a single file descriptor
5933 * This is just a matter of traces coherency, the ring buffer itself
5934 * is protected.
5935 */
5936 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04005937
5938 /* return any leftover data */
5939 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5940 if (sret != -EBUSY)
5941 goto out;
5942
5943 trace_seq_init(&iter->seq);
5944
Steven Rostedt107bad82008-05-12 21:21:01 +02005945 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005946 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5947 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02005948 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02005949 }
5950
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02005951waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005952 sret = tracing_wait_pipe(filp);
5953 if (sret <= 0)
5954 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005955
5956 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005957 if (trace_empty(iter)) {
5958 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02005959 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02005960 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02005961
5962 if (cnt >= PAGE_SIZE)
5963 cnt = PAGE_SIZE - 1;
5964
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005965 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02005966 memset(&iter->seq, 0,
5967 sizeof(struct trace_iterator) -
5968 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04005969 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02005970 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005971
Lai Jiangshan4f535962009-05-18 19:35:34 +08005972 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005973 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05005974 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005975 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005976 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005977
Ingo Molnarf9896bf2008-05-12 21:20:47 +02005978 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02005979 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02005980 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005981 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005982 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02005983 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01005984 if (ret != TRACE_TYPE_NO_CONSUME)
5985 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005986
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005987 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02005988 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01005989
5990 /*
5991 * Setting the full flag means we reached the trace_seq buffer
5992 * size and we should leave by partial output condition above.
5993 * One of the trace_seq_* functions is not used properly.
5994 */
5995 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5996 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02005997 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005998 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005999 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006000
Steven Rostedtb3806b42008-05-12 21:20:46 +02006001 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006002 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006003 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006004 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006005
6006 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006007 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006008 * entries, go back to wait for more entries.
6009 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006010 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006011 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006012
Steven Rostedt107bad82008-05-12 21:21:01 +02006013out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006014 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006015
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006016 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006017}
6018
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006019static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6020 unsigned int idx)
6021{
6022 __free_page(spd->pages[idx]);
6023}
6024
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08006025static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006026 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006027 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05006028 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006029 .steal = generic_pipe_buf_steal,
6030 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006031};
6032
Steven Rostedt34cd4992009-02-09 12:06:29 -05006033static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006034tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006035{
6036 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006037 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006038 int ret;
6039
6040 /* Seq buffer is page-sized, exactly what we need. */
6041 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006042 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006043 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006044
6045 if (trace_seq_has_overflowed(&iter->seq)) {
6046 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006047 break;
6048 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006049
6050 /*
6051 * This should not be hit, because it should only
6052 * be set if the iter->seq overflowed. But check it
6053 * anyway to be safe.
6054 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006055 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006056 iter->seq.seq.len = save_len;
6057 break;
6058 }
6059
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006060 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006061 if (rem < count) {
6062 rem = 0;
6063 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006064 break;
6065 }
6066
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006067 if (ret != TRACE_TYPE_NO_CONSUME)
6068 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006069 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006070 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006071 rem = 0;
6072 iter->ent = NULL;
6073 break;
6074 }
6075 }
6076
6077 return rem;
6078}
6079
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006080static ssize_t tracing_splice_read_pipe(struct file *filp,
6081 loff_t *ppos,
6082 struct pipe_inode_info *pipe,
6083 size_t len,
6084 unsigned int flags)
6085{
Jens Axboe35f3d142010-05-20 10:43:18 +02006086 struct page *pages_def[PIPE_DEF_BUFFERS];
6087 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006088 struct trace_iterator *iter = filp->private_data;
6089 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006090 .pages = pages_def,
6091 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006092 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006093 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006094 .ops = &tracing_pipe_buf_ops,
6095 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006096 };
6097 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006098 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006099 unsigned int i;
6100
Jens Axboe35f3d142010-05-20 10:43:18 +02006101 if (splice_grow_spd(pipe, &spd))
6102 return -ENOMEM;
6103
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006104 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006105
6106 if (iter->trace->splice_read) {
6107 ret = iter->trace->splice_read(iter, filp,
6108 ppos, pipe, len, flags);
6109 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006110 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006111 }
6112
6113 ret = tracing_wait_pipe(filp);
6114 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006115 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006116
Jason Wessel955b61e2010-08-05 09:22:23 -05006117 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006118 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006119 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006120 }
6121
Lai Jiangshan4f535962009-05-18 19:35:34 +08006122 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006123 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006124
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006125 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006126 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006127 spd.pages[i] = alloc_page(GFP_KERNEL);
6128 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006129 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006130
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006131 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006132
6133 /* Copy the data into the page, so we can start over. */
6134 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006135 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006136 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006137 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006138 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006139 break;
6140 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006141 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006142 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006143
Steven Rostedtf9520752009-03-02 14:04:40 -05006144 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006145 }
6146
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006147 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006148 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006149 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006150
6151 spd.nr_pages = i;
6152
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006153 if (i)
6154 ret = splice_to_pipe(pipe, &spd);
6155 else
6156 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006157out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006158 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006159 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006160
Steven Rostedt34cd4992009-02-09 12:06:29 -05006161out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006162 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006163 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006164}
6165
Steven Rostedta98a3c32008-05-12 21:20:59 +02006166static ssize_t
6167tracing_entries_read(struct file *filp, char __user *ubuf,
6168 size_t cnt, loff_t *ppos)
6169{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006170 struct inode *inode = file_inode(filp);
6171 struct trace_array *tr = inode->i_private;
6172 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006173 char buf[64];
6174 int r = 0;
6175 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006176
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006177 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006178
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006179 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006180 int cpu, buf_size_same;
6181 unsigned long size;
6182
6183 size = 0;
6184 buf_size_same = 1;
6185 /* check if all cpu sizes are same */
6186 for_each_tracing_cpu(cpu) {
6187 /* fill in the size from first enabled cpu */
6188 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006189 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6190 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006191 buf_size_same = 0;
6192 break;
6193 }
6194 }
6195
6196 if (buf_size_same) {
6197 if (!ring_buffer_expanded)
6198 r = sprintf(buf, "%lu (expanded: %lu)\n",
6199 size >> 10,
6200 trace_buf_size >> 10);
6201 else
6202 r = sprintf(buf, "%lu\n", size >> 10);
6203 } else
6204 r = sprintf(buf, "X\n");
6205 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006206 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006207
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006208 mutex_unlock(&trace_types_lock);
6209
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006210 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6211 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006212}
6213
6214static ssize_t
6215tracing_entries_write(struct file *filp, const char __user *ubuf,
6216 size_t cnt, loff_t *ppos)
6217{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006218 struct inode *inode = file_inode(filp);
6219 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006220 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006221 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006222
Peter Huewe22fe9b52011-06-07 21:58:27 +02006223 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6224 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006225 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006226
6227 /* must have at least 1 entry */
6228 if (!val)
6229 return -EINVAL;
6230
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006231 /* value is in KB */
6232 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006233 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006234 if (ret < 0)
6235 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006236
Jiri Olsacf8517c2009-10-23 19:36:16 -04006237 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006238
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006239 return cnt;
6240}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006241
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006242static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006243tracing_total_entries_read(struct file *filp, char __user *ubuf,
6244 size_t cnt, loff_t *ppos)
6245{
6246 struct trace_array *tr = filp->private_data;
6247 char buf[64];
6248 int r, cpu;
6249 unsigned long size = 0, expanded_size = 0;
6250
6251 mutex_lock(&trace_types_lock);
6252 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006253 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006254 if (!ring_buffer_expanded)
6255 expanded_size += trace_buf_size >> 10;
6256 }
6257 if (ring_buffer_expanded)
6258 r = sprintf(buf, "%lu\n", size);
6259 else
6260 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6261 mutex_unlock(&trace_types_lock);
6262
6263 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6264}
6265
6266static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006267tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6268 size_t cnt, loff_t *ppos)
6269{
6270 /*
6271 * There is no need to read what the user has written, this function
6272 * is just to make sure that there is no error when "echo" is used
6273 */
6274
6275 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006276
6277 return cnt;
6278}
6279
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006280static int
6281tracing_free_buffer_release(struct inode *inode, struct file *filp)
6282{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006283 struct trace_array *tr = inode->i_private;
6284
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006285 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006286 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006287 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006288 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006289 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006290
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006291 trace_array_put(tr);
6292
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006293 return 0;
6294}
6295
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006296static ssize_t
6297tracing_mark_write(struct file *filp, const char __user *ubuf,
6298 size_t cnt, loff_t *fpos)
6299{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006300 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006301 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006302 enum event_trigger_type tt = ETT_NONE;
Steven Rostedtd696b582011-09-22 11:50:27 -04006303 struct ring_buffer *buffer;
6304 struct print_entry *entry;
6305 unsigned long irq_flags;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006306 const char faulted[] = "<faulted>";
Steven Rostedtd696b582011-09-22 11:50:27 -04006307 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006308 int size;
6309 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006310
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006311/* Used in tracing_mark_raw_write() as well */
6312#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006313
Steven Rostedtc76f0692008-11-07 22:36:02 -05006314 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006315 return -EINVAL;
6316
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006317 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006318 return -EINVAL;
6319
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006320 if (cnt > TRACE_BUF_SIZE)
6321 cnt = TRACE_BUF_SIZE;
6322
Steven Rostedtd696b582011-09-22 11:50:27 -04006323 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006324
Steven Rostedtd696b582011-09-22 11:50:27 -04006325 local_save_flags(irq_flags);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006326 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6327
6328 /* If less than "<faulted>", then make sure we can still add that */
6329 if (cnt < FAULTED_SIZE)
6330 size += FAULTED_SIZE - cnt;
6331
Alexander Z Lam2d716192013-07-01 15:31:24 -07006332 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006333 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6334 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006335 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006336 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006337 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006338
6339 entry = ring_buffer_event_data(event);
6340 entry->ip = _THIS_IP_;
6341
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006342 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6343 if (len) {
6344 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6345 cnt = FAULTED_SIZE;
6346 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006347 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006348 written = cnt;
6349 len = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006350
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006351 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6352 /* do not add \n before testing triggers, but add \0 */
6353 entry->buf[cnt] = '\0';
6354 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6355 }
6356
Steven Rostedtd696b582011-09-22 11:50:27 -04006357 if (entry->buf[cnt - 1] != '\n') {
6358 entry->buf[cnt] = '\n';
6359 entry->buf[cnt + 1] = '\0';
6360 } else
6361 entry->buf[cnt] = '\0';
6362
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006363 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006364
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006365 if (tt)
6366 event_triggers_post_call(tr->trace_marker_file, tt);
6367
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006368 if (written > 0)
6369 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006370
Steven Rostedtfa32e852016-07-06 15:25:08 -04006371 return written;
6372}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006373
Steven Rostedtfa32e852016-07-06 15:25:08 -04006374/* Limit it for now to 3K (including tag) */
6375#define RAW_DATA_MAX_SIZE (1024*3)
6376
6377static ssize_t
6378tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6379 size_t cnt, loff_t *fpos)
6380{
6381 struct trace_array *tr = filp->private_data;
6382 struct ring_buffer_event *event;
6383 struct ring_buffer *buffer;
6384 struct raw_data_entry *entry;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006385 const char faulted[] = "<faulted>";
Steven Rostedtfa32e852016-07-06 15:25:08 -04006386 unsigned long irq_flags;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006387 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006388 int size;
6389 int len;
6390
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006391#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6392
Steven Rostedtfa32e852016-07-06 15:25:08 -04006393 if (tracing_disabled)
6394 return -EINVAL;
6395
6396 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6397 return -EINVAL;
6398
6399 /* The marker must at least have a tag id */
6400 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6401 return -EINVAL;
6402
6403 if (cnt > TRACE_BUF_SIZE)
6404 cnt = TRACE_BUF_SIZE;
6405
6406 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6407
Steven Rostedtfa32e852016-07-06 15:25:08 -04006408 local_save_flags(irq_flags);
6409 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006410 if (cnt < FAULT_SIZE_ID)
6411 size += FAULT_SIZE_ID - cnt;
6412
Steven Rostedtfa32e852016-07-06 15:25:08 -04006413 buffer = tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006414 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6415 irq_flags, preempt_count());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006416 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04006417 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006418 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006419
6420 entry = ring_buffer_event_data(event);
6421
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006422 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6423 if (len) {
6424 entry->id = -1;
6425 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6426 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006427 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006428 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006429
6430 __buffer_unlock_commit(buffer, event);
6431
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006432 if (written > 0)
6433 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006434
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02006435 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006436}
6437
Li Zefan13f16d22009-12-08 11:16:11 +08006438static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08006439{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006440 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08006441 int i;
6442
6443 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08006444 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08006445 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006446 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6447 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08006448 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08006449
Li Zefan13f16d22009-12-08 11:16:11 +08006450 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08006451}
6452
Tom Zanussid71bd342018-01-15 20:52:07 -06006453int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08006454{
Zhaolei5079f322009-08-25 16:12:56 +08006455 int i;
6456
Zhaolei5079f322009-08-25 16:12:56 +08006457 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6458 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6459 break;
6460 }
6461 if (i == ARRAY_SIZE(trace_clocks))
6462 return -EINVAL;
6463
Zhaolei5079f322009-08-25 16:12:56 +08006464 mutex_lock(&trace_types_lock);
6465
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006466 tr->clock_id = i;
6467
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006468 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08006469
David Sharp60303ed2012-10-11 16:27:52 -07006470 /*
6471 * New clock may not be consistent with the previous clock.
6472 * Reset the buffer so that it doesn't have incomparable timestamps.
6473 */
Alexander Z Lam94571582013-08-02 18:36:16 -07006474 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006475
6476#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05006477 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006478 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07006479 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006480#endif
David Sharp60303ed2012-10-11 16:27:52 -07006481
Zhaolei5079f322009-08-25 16:12:56 +08006482 mutex_unlock(&trace_types_lock);
6483
Steven Rostedte1e232c2014-02-10 23:38:46 -05006484 return 0;
6485}
6486
6487static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6488 size_t cnt, loff_t *fpos)
6489{
6490 struct seq_file *m = filp->private_data;
6491 struct trace_array *tr = m->private;
6492 char buf[64];
6493 const char *clockstr;
6494 int ret;
6495
6496 if (cnt >= sizeof(buf))
6497 return -EINVAL;
6498
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006499 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05006500 return -EFAULT;
6501
6502 buf[cnt] = 0;
6503
6504 clockstr = strstrip(buf);
6505
6506 ret = tracing_set_clock(tr, clockstr);
6507 if (ret)
6508 return ret;
6509
Zhaolei5079f322009-08-25 16:12:56 +08006510 *fpos += cnt;
6511
6512 return cnt;
6513}
6514
Li Zefan13f16d22009-12-08 11:16:11 +08006515static int tracing_clock_open(struct inode *inode, struct file *file)
6516{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006517 struct trace_array *tr = inode->i_private;
6518 int ret;
6519
Li Zefan13f16d22009-12-08 11:16:11 +08006520 if (tracing_disabled)
6521 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006522
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006523 if (trace_array_get(tr))
6524 return -ENODEV;
6525
6526 ret = single_open(file, tracing_clock_show, inode->i_private);
6527 if (ret < 0)
6528 trace_array_put(tr);
6529
6530 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08006531}
6532
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006533static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6534{
6535 struct trace_array *tr = m->private;
6536
6537 mutex_lock(&trace_types_lock);
6538
6539 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6540 seq_puts(m, "delta [absolute]\n");
6541 else
6542 seq_puts(m, "[delta] absolute\n");
6543
6544 mutex_unlock(&trace_types_lock);
6545
6546 return 0;
6547}
6548
6549static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6550{
6551 struct trace_array *tr = inode->i_private;
6552 int ret;
6553
6554 if (tracing_disabled)
6555 return -ENODEV;
6556
6557 if (trace_array_get(tr))
6558 return -ENODEV;
6559
6560 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6561 if (ret < 0)
6562 trace_array_put(tr);
6563
6564 return ret;
6565}
6566
Tom Zanussi00b41452018-01-15 20:51:39 -06006567int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6568{
6569 int ret = 0;
6570
6571 mutex_lock(&trace_types_lock);
6572
6573 if (abs && tr->time_stamp_abs_ref++)
6574 goto out;
6575
6576 if (!abs) {
6577 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6578 ret = -EINVAL;
6579 goto out;
6580 }
6581
6582 if (--tr->time_stamp_abs_ref)
6583 goto out;
6584 }
6585
6586 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6587
6588#ifdef CONFIG_TRACER_MAX_TRACE
6589 if (tr->max_buffer.buffer)
6590 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6591#endif
6592 out:
6593 mutex_unlock(&trace_types_lock);
6594
6595 return ret;
6596}
6597
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006598struct ftrace_buffer_info {
6599 struct trace_iterator iter;
6600 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006601 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006602 unsigned int read;
6603};
6604
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006605#ifdef CONFIG_TRACER_SNAPSHOT
6606static int tracing_snapshot_open(struct inode *inode, struct file *file)
6607{
Oleg Nesterov6484c712013-07-23 17:26:10 +02006608 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006609 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006610 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006611 int ret = 0;
6612
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006613 if (trace_array_get(tr) < 0)
6614 return -ENODEV;
6615
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006616 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02006617 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006618 if (IS_ERR(iter))
6619 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006620 } else {
6621 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006622 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006623 m = kzalloc(sizeof(*m), GFP_KERNEL);
6624 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006625 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006626 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6627 if (!iter) {
6628 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006629 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006630 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006631 ret = 0;
6632
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006633 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02006634 iter->trace_buffer = &tr->max_buffer;
6635 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006636 m->private = iter;
6637 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006638 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006639out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006640 if (ret < 0)
6641 trace_array_put(tr);
6642
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006643 return ret;
6644}
6645
6646static ssize_t
6647tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6648 loff_t *ppos)
6649{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006650 struct seq_file *m = filp->private_data;
6651 struct trace_iterator *iter = m->private;
6652 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006653 unsigned long val;
6654 int ret;
6655
6656 ret = tracing_update_buffers();
6657 if (ret < 0)
6658 return ret;
6659
6660 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6661 if (ret)
6662 return ret;
6663
6664 mutex_lock(&trace_types_lock);
6665
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006666 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006667 ret = -EBUSY;
6668 goto out;
6669 }
6670
Tom Zanussia35873a2019-02-13 17:42:45 -06006671 arch_spin_lock(&tr->max_lock);
6672 if (tr->cond_snapshot)
6673 ret = -EBUSY;
6674 arch_spin_unlock(&tr->max_lock);
6675 if (ret)
6676 goto out;
6677
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006678 switch (val) {
6679 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006680 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6681 ret = -EINVAL;
6682 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006683 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006684 if (tr->allocated_snapshot)
6685 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006686 break;
6687 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006688/* Only allow per-cpu swap if the ring buffer supports it */
6689#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6690 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6691 ret = -EINVAL;
6692 break;
6693 }
6694#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006695 if (!tr->allocated_snapshot) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006696 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006697 if (ret < 0)
6698 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006699 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006700 local_irq_disable();
6701 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006702 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06006703 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006704 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006705 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006706 local_irq_enable();
6707 break;
6708 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006709 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006710 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6711 tracing_reset_online_cpus(&tr->max_buffer);
6712 else
6713 tracing_reset(&tr->max_buffer, iter->cpu_file);
6714 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006715 break;
6716 }
6717
6718 if (ret >= 0) {
6719 *ppos += cnt;
6720 ret = cnt;
6721 }
6722out:
6723 mutex_unlock(&trace_types_lock);
6724 return ret;
6725}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006726
6727static int tracing_snapshot_release(struct inode *inode, struct file *file)
6728{
6729 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006730 int ret;
6731
6732 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006733
6734 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04006735 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006736
6737 /* If write only, the seq_file is just a stub */
6738 if (m)
6739 kfree(m->private);
6740 kfree(m);
6741
6742 return 0;
6743}
6744
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006745static int tracing_buffers_open(struct inode *inode, struct file *filp);
6746static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6747 size_t count, loff_t *ppos);
6748static int tracing_buffers_release(struct inode *inode, struct file *file);
6749static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6750 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6751
6752static int snapshot_raw_open(struct inode *inode, struct file *filp)
6753{
6754 struct ftrace_buffer_info *info;
6755 int ret;
6756
6757 ret = tracing_buffers_open(inode, filp);
6758 if (ret < 0)
6759 return ret;
6760
6761 info = filp->private_data;
6762
6763 if (info->iter.trace->use_max_tr) {
6764 tracing_buffers_release(inode, filp);
6765 return -EBUSY;
6766 }
6767
6768 info->iter.snapshot = true;
6769 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6770
6771 return ret;
6772}
6773
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006774#endif /* CONFIG_TRACER_SNAPSHOT */
6775
6776
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006777static const struct file_operations tracing_thresh_fops = {
6778 .open = tracing_open_generic,
6779 .read = tracing_thresh_read,
6780 .write = tracing_thresh_write,
6781 .llseek = generic_file_llseek,
6782};
6783
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006784#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006785static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006786 .open = tracing_open_generic,
6787 .read = tracing_max_lat_read,
6788 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006789 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006790};
Chen Gange428abb2015-11-10 05:15:15 +08006791#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006792
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006793static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006794 .open = tracing_open_generic,
6795 .read = tracing_set_trace_read,
6796 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006797 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006798};
6799
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006800static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006801 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006802 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006803 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006804 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006805 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006806 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02006807};
6808
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006809static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006810 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006811 .read = tracing_entries_read,
6812 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006813 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006814 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02006815};
6816
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006817static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006818 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006819 .read = tracing_total_entries_read,
6820 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006821 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006822};
6823
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006824static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006825 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006826 .write = tracing_free_buffer_write,
6827 .release = tracing_free_buffer_release,
6828};
6829
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006830static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006831 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006832 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006833 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006834 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006835};
6836
Steven Rostedtfa32e852016-07-06 15:25:08 -04006837static const struct file_operations tracing_mark_raw_fops = {
6838 .open = tracing_open_generic_tr,
6839 .write = tracing_mark_raw_write,
6840 .llseek = generic_file_llseek,
6841 .release = tracing_release_generic_tr,
6842};
6843
Zhaolei5079f322009-08-25 16:12:56 +08006844static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08006845 .open = tracing_clock_open,
6846 .read = seq_read,
6847 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006848 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08006849 .write = tracing_clock_write,
6850};
6851
Tom Zanussi2c1ea602018-01-15 20:51:41 -06006852static const struct file_operations trace_time_stamp_mode_fops = {
6853 .open = tracing_time_stamp_mode_open,
6854 .read = seq_read,
6855 .llseek = seq_lseek,
6856 .release = tracing_single_release_tr,
6857};
6858
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006859#ifdef CONFIG_TRACER_SNAPSHOT
6860static const struct file_operations snapshot_fops = {
6861 .open = tracing_snapshot_open,
6862 .read = seq_read,
6863 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05006864 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006865 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006866};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006867
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006868static const struct file_operations snapshot_raw_fops = {
6869 .open = snapshot_raw_open,
6870 .read = tracing_buffers_read,
6871 .release = tracing_buffers_release,
6872 .splice_read = tracing_buffers_splice_read,
6873 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006874};
6875
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006876#endif /* CONFIG_TRACER_SNAPSHOT */
6877
Steven Rostedt2cadf912008-12-01 22:20:19 -05006878static int tracing_buffers_open(struct inode *inode, struct file *filp)
6879{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006880 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006881 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006882 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006883
6884 if (tracing_disabled)
6885 return -ENODEV;
6886
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006887 if (trace_array_get(tr) < 0)
6888 return -ENODEV;
6889
Steven Rostedt2cadf912008-12-01 22:20:19 -05006890 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006891 if (!info) {
6892 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006893 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006894 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006895
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006896 mutex_lock(&trace_types_lock);
6897
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006898 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006899 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05006900 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006901 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006902 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006903 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006904 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006905
6906 filp->private_data = info;
6907
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006908 tr->current_trace->ref++;
6909
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006910 mutex_unlock(&trace_types_lock);
6911
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006912 ret = nonseekable_open(inode, filp);
6913 if (ret < 0)
6914 trace_array_put(tr);
6915
6916 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006917}
6918
Al Viro9dd95742017-07-03 00:42:43 -04006919static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006920tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6921{
6922 struct ftrace_buffer_info *info = filp->private_data;
6923 struct trace_iterator *iter = &info->iter;
6924
6925 return trace_poll(iter, filp, poll_table);
6926}
6927
Steven Rostedt2cadf912008-12-01 22:20:19 -05006928static ssize_t
6929tracing_buffers_read(struct file *filp, char __user *ubuf,
6930 size_t count, loff_t *ppos)
6931{
6932 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006933 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006934 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006935 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006936
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006937 if (!count)
6938 return 0;
6939
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006940#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006941 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6942 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006943#endif
6944
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006945 if (!info->spare) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006946 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6947 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006948 if (IS_ERR(info->spare)) {
6949 ret = PTR_ERR(info->spare);
6950 info->spare = NULL;
6951 } else {
6952 info->spare_cpu = iter->cpu_file;
6953 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04006954 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006955 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04006956 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08006957
Steven Rostedt2cadf912008-12-01 22:20:19 -05006958 /* Do we have previous read data to read? */
6959 if (info->read < PAGE_SIZE)
6960 goto read;
6961
Steven Rostedtb6273442013-02-28 13:44:11 -05006962 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006963 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006964 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006965 &info->spare,
6966 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006967 iter->cpu_file, 0);
6968 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05006969
6970 if (ret < 0) {
6971 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006972 if ((filp->f_flags & O_NONBLOCK))
6973 return -EAGAIN;
6974
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006975 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006976 if (ret)
6977 return ret;
6978
Steven Rostedtb6273442013-02-28 13:44:11 -05006979 goto again;
6980 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006981 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006982 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05006983
Steven Rostedt436fc282011-10-14 10:44:25 -04006984 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05006985 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05006986 size = PAGE_SIZE - info->read;
6987 if (size > count)
6988 size = count;
6989
6990 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006991 if (ret == size)
6992 return -EFAULT;
6993
Steven Rostedt2dc5d122009-03-04 19:10:05 -05006994 size -= ret;
6995
Steven Rostedt2cadf912008-12-01 22:20:19 -05006996 *ppos += size;
6997 info->read += size;
6998
6999 return size;
7000}
7001
7002static int tracing_buffers_release(struct inode *inode, struct file *file)
7003{
7004 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007005 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007006
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007007 mutex_lock(&trace_types_lock);
7008
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007009 iter->tr->current_trace->ref--;
7010
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007011 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007012
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007013 if (info->spare)
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007014 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7015 info->spare_cpu, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007016 kfree(info);
7017
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007018 mutex_unlock(&trace_types_lock);
7019
Steven Rostedt2cadf912008-12-01 22:20:19 -05007020 return 0;
7021}
7022
7023struct buffer_ref {
7024 struct ring_buffer *buffer;
7025 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007026 int cpu;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007027 int ref;
7028};
7029
7030static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7031 struct pipe_buffer *buf)
7032{
7033 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7034
7035 if (--ref->ref)
7036 return;
7037
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007038 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007039 kfree(ref);
7040 buf->private = 0;
7041}
7042
Steven Rostedt2cadf912008-12-01 22:20:19 -05007043static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7044 struct pipe_buffer *buf)
7045{
7046 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7047
7048 ref->ref++;
7049}
7050
7051/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007052static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007053 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007054 .confirm = generic_pipe_buf_confirm,
7055 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09007056 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007057 .get = buffer_pipe_buf_get,
7058};
7059
7060/*
7061 * Callback from splice_to_pipe(), if we need to release some pages
7062 * at the end of the spd in case we error'ed out in filling the pipe.
7063 */
7064static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7065{
7066 struct buffer_ref *ref =
7067 (struct buffer_ref *)spd->partial[i].private;
7068
7069 if (--ref->ref)
7070 return;
7071
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007072 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007073 kfree(ref);
7074 spd->partial[i].private = 0;
7075}
7076
7077static ssize_t
7078tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7079 struct pipe_inode_info *pipe, size_t len,
7080 unsigned int flags)
7081{
7082 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007083 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007084 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7085 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007086 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007087 .pages = pages_def,
7088 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007089 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007090 .ops = &buffer_pipe_buf_ops,
7091 .spd_release = buffer_spd_release,
7092 };
7093 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007094 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007095 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007096
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007097#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007098 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7099 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007100#endif
7101
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007102 if (*ppos & (PAGE_SIZE - 1))
7103 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007104
7105 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007106 if (len < PAGE_SIZE)
7107 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007108 len &= PAGE_MASK;
7109 }
7110
Al Viro1ae22932016-09-17 18:31:46 -04007111 if (splice_grow_spd(pipe, &spd))
7112 return -ENOMEM;
7113
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007114 again:
7115 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007116 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007117
Al Viroa786c062014-04-11 12:01:03 -04007118 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007119 struct page *page;
7120 int r;
7121
7122 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007123 if (!ref) {
7124 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007125 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007126 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007127
Steven Rostedt7267fa62009-04-29 00:16:21 -04007128 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007129 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007130 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007131 if (IS_ERR(ref->page)) {
7132 ret = PTR_ERR(ref->page);
7133 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007134 kfree(ref);
7135 break;
7136 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007137 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007138
7139 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007140 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007141 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007142 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7143 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007144 kfree(ref);
7145 break;
7146 }
7147
Steven Rostedt2cadf912008-12-01 22:20:19 -05007148 page = virt_to_page(ref->page);
7149
7150 spd.pages[i] = page;
7151 spd.partial[i].len = PAGE_SIZE;
7152 spd.partial[i].offset = 0;
7153 spd.partial[i].private = (unsigned long)ref;
7154 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007155 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04007156
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007157 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007158 }
7159
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007160 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007161 spd.nr_pages = i;
7162
7163 /* did we read anything? */
7164 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01007165 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007166 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01007167
Al Viro1ae22932016-09-17 18:31:46 -04007168 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007169 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04007170 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007171
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007172 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04007173 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04007174 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01007175
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007176 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007177 }
7178
7179 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04007180out:
Eric Dumazet047fe362012-06-12 15:24:40 +02007181 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007182
Steven Rostedt2cadf912008-12-01 22:20:19 -05007183 return ret;
7184}
7185
7186static const struct file_operations tracing_buffers_fops = {
7187 .open = tracing_buffers_open,
7188 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007189 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007190 .release = tracing_buffers_release,
7191 .splice_read = tracing_buffers_splice_read,
7192 .llseek = no_llseek,
7193};
7194
Steven Rostedtc8d77182009-04-29 18:03:45 -04007195static ssize_t
7196tracing_stats_read(struct file *filp, char __user *ubuf,
7197 size_t count, loff_t *ppos)
7198{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007199 struct inode *inode = file_inode(filp);
7200 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007201 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007202 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007203 struct trace_seq *s;
7204 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007205 unsigned long long t;
7206 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007207
Li Zefane4f2d102009-06-15 10:57:28 +08007208 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007209 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01007210 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04007211
7212 trace_seq_init(s);
7213
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007214 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007215 trace_seq_printf(s, "entries: %ld\n", cnt);
7216
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007217 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007218 trace_seq_printf(s, "overrun: %ld\n", cnt);
7219
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007220 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007221 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7222
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007223 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007224 trace_seq_printf(s, "bytes: %ld\n", cnt);
7225
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09007226 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007227 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007228 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007229 usec_rem = do_div(t, USEC_PER_SEC);
7230 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7231 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007232
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007233 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007234 usec_rem = do_div(t, USEC_PER_SEC);
7235 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7236 } else {
7237 /* counter or tsc mode for trace_clock */
7238 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007239 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007240
7241 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007242 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08007243 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07007244
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007245 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07007246 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7247
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007248 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05007249 trace_seq_printf(s, "read events: %ld\n", cnt);
7250
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05007251 count = simple_read_from_buffer(ubuf, count, ppos,
7252 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04007253
7254 kfree(s);
7255
7256 return count;
7257}
7258
7259static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007260 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007261 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007262 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007263 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04007264};
7265
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007266#ifdef CONFIG_DYNAMIC_FTRACE
7267
7268static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007269tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007270 size_t cnt, loff_t *ppos)
7271{
7272 unsigned long *p = filp->private_data;
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007273 char buf[64]; /* Not too big for a shallow stack */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007274 int r;
7275
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007276 r = scnprintf(buf, 63, "%ld", *p);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007277 buf[r++] = '\n';
7278
Steven Rostedt (VMware)6a9c9812017-06-27 11:02:49 -04007279 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007280}
7281
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007282static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007283 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04007284 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007285 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007286};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007287#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007288
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007289#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7290static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007291ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007292 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007293 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007294{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007295 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007296}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007297
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007298static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04007299ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007300 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007301 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007302{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007303 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007304 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007305
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007306 if (mapper)
7307 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007308
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007309 if (count) {
7310
7311 if (*count <= 0)
7312 return;
7313
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007314 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007315 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007316
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04007317 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007318}
7319
7320static int
7321ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7322 struct ftrace_probe_ops *ops, void *data)
7323{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007324 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007325 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007326
7327 seq_printf(m, "%ps:", (void *)ip);
7328
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01007329 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007330
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007331 if (mapper)
7332 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7333
7334 if (count)
7335 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007336 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007337 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007338
7339 return 0;
7340}
7341
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007342static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007343ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007344 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007345{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007346 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007347
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007348 if (!mapper) {
7349 mapper = allocate_ftrace_func_mapper();
7350 if (!mapper)
7351 return -ENOMEM;
7352 *data = mapper;
7353 }
7354
7355 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007356}
7357
7358static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04007359ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007360 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007361{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04007362 struct ftrace_func_mapper *mapper = data;
7363
7364 if (!ip) {
7365 if (!mapper)
7366 return;
7367 free_ftrace_func_mapper(mapper, NULL);
7368 return;
7369 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007370
7371 ftrace_func_mapper_remove_ip(mapper, ip);
7372}
7373
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007374static struct ftrace_probe_ops snapshot_probe_ops = {
7375 .func = ftrace_snapshot,
7376 .print = ftrace_snapshot_print,
7377};
7378
7379static struct ftrace_probe_ops snapshot_count_probe_ops = {
7380 .func = ftrace_count_snapshot,
7381 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04007382 .init = ftrace_snapshot_init,
7383 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007384};
7385
7386static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007387ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007388 char *glob, char *cmd, char *param, int enable)
7389{
7390 struct ftrace_probe_ops *ops;
7391 void *count = (void *)-1;
7392 char *number;
7393 int ret;
7394
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04007395 if (!tr)
7396 return -ENODEV;
7397
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007398 /* hash funcs only work with set_ftrace_filter */
7399 if (!enable)
7400 return -EINVAL;
7401
7402 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7403
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04007404 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04007405 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007406
7407 if (!param)
7408 goto out_reg;
7409
7410 number = strsep(&param, ":");
7411
7412 if (!strlen(number))
7413 goto out_reg;
7414
7415 /*
7416 * We use the callback data field (which is a pointer)
7417 * as our counter.
7418 */
7419 ret = kstrtoul(number, 0, (unsigned long *)&count);
7420 if (ret)
7421 return ret;
7422
7423 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007424 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007425 if (ret < 0)
7426 goto out;
7427
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04007428 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007429
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04007430 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007431 return ret < 0 ? ret : 0;
7432}
7433
7434static struct ftrace_func_command ftrace_snapshot_cmd = {
7435 .name = "snapshot",
7436 .func = ftrace_trace_snapshot_callback,
7437};
7438
Tom Zanussi38de93a2013-10-24 08:34:18 -05007439static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007440{
7441 return register_ftrace_command(&ftrace_snapshot_cmd);
7442}
7443#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05007444static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007445#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007446
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007447static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007448{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007449 if (WARN_ON(!tr->dir))
7450 return ERR_PTR(-ENODEV);
7451
7452 /* Top directory uses NULL as the parent */
7453 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7454 return NULL;
7455
7456 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007457 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007458}
7459
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007460static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7461{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007462 struct dentry *d_tracer;
7463
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007464 if (tr->percpu_dir)
7465 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007466
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007467 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007468 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007469 return NULL;
7470
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007471 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007472
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007473 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007474 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007475
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007476 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007477}
7478
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007479static struct dentry *
7480trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7481 void *data, long cpu, const struct file_operations *fops)
7482{
7483 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7484
7485 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00007486 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007487 return ret;
7488}
7489
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007490static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007491tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007492{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007493 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007494 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04007495 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007496
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09007497 if (!d_percpu)
7498 return;
7499
Steven Rostedtdd49a382010-10-20 21:51:26 -04007500 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007501 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007502 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007503 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007504 return;
7505 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007506
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01007507 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007508 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02007509 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007510
7511 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007512 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007513 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04007514
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007515 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007516 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04007517
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007518 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02007519 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08007520
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007521 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007522 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007523
7524#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007525 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007526 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007527
Oleg Nesterov649e9c702013-07-23 17:25:54 +02007528 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007529 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007530#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007531}
7532
Steven Rostedt60a11772008-05-12 21:20:44 +02007533#ifdef CONFIG_FTRACE_SELFTEST
7534/* Let selftest have access to static functions in this file */
7535#include "trace_selftest.c"
7536#endif
7537
Steven Rostedt577b7852009-02-26 23:43:05 -05007538static ssize_t
7539trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7540 loff_t *ppos)
7541{
7542 struct trace_option_dentry *topt = filp->private_data;
7543 char *buf;
7544
7545 if (topt->flags->val & topt->opt->bit)
7546 buf = "1\n";
7547 else
7548 buf = "0\n";
7549
7550 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7551}
7552
7553static ssize_t
7554trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7555 loff_t *ppos)
7556{
7557 struct trace_option_dentry *topt = filp->private_data;
7558 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05007559 int ret;
7560
Peter Huewe22fe9b52011-06-07 21:58:27 +02007561 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7562 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05007563 return ret;
7564
Li Zefan8d18eaa2009-12-08 11:17:06 +08007565 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05007566 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08007567
7568 if (!!(topt->flags->val & topt->opt->bit) != val) {
7569 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05007570 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05007571 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08007572 mutex_unlock(&trace_types_lock);
7573 if (ret)
7574 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05007575 }
7576
7577 *ppos += cnt;
7578
7579 return cnt;
7580}
7581
7582
7583static const struct file_operations trace_options_fops = {
7584 .open = tracing_open_generic,
7585 .read = trace_options_read,
7586 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007587 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05007588};
7589
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007590/*
7591 * In order to pass in both the trace_array descriptor as well as the index
7592 * to the flag that the trace option file represents, the trace_array
7593 * has a character array of trace_flags_index[], which holds the index
7594 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7595 * The address of this character array is passed to the flag option file
7596 * read/write callbacks.
7597 *
7598 * In order to extract both the index and the trace_array descriptor,
7599 * get_tr_index() uses the following algorithm.
7600 *
7601 * idx = *ptr;
7602 *
7603 * As the pointer itself contains the address of the index (remember
7604 * index[1] == 1).
7605 *
7606 * Then to get the trace_array descriptor, by subtracting that index
7607 * from the ptr, we get to the start of the index itself.
7608 *
7609 * ptr - idx == &index[0]
7610 *
7611 * Then a simple container_of() from that pointer gets us to the
7612 * trace_array descriptor.
7613 */
7614static void get_tr_index(void *data, struct trace_array **ptr,
7615 unsigned int *pindex)
7616{
7617 *pindex = *(unsigned char *)data;
7618
7619 *ptr = container_of(data - *pindex, struct trace_array,
7620 trace_flags_index);
7621}
7622
Steven Rostedta8259072009-02-26 22:19:12 -05007623static ssize_t
7624trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7625 loff_t *ppos)
7626{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007627 void *tr_index = filp->private_data;
7628 struct trace_array *tr;
7629 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007630 char *buf;
7631
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007632 get_tr_index(tr_index, &tr, &index);
7633
7634 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05007635 buf = "1\n";
7636 else
7637 buf = "0\n";
7638
7639 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7640}
7641
7642static ssize_t
7643trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7644 loff_t *ppos)
7645{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007646 void *tr_index = filp->private_data;
7647 struct trace_array *tr;
7648 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05007649 unsigned long val;
7650 int ret;
7651
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007652 get_tr_index(tr_index, &tr, &index);
7653
Peter Huewe22fe9b52011-06-07 21:58:27 +02007654 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7655 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05007656 return ret;
7657
Zhaoleif2d84b62009-08-07 18:55:48 +08007658 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05007659 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007660
7661 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007662 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04007663 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05007664
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04007665 if (ret < 0)
7666 return ret;
7667
Steven Rostedta8259072009-02-26 22:19:12 -05007668 *ppos += cnt;
7669
7670 return cnt;
7671}
7672
Steven Rostedta8259072009-02-26 22:19:12 -05007673static const struct file_operations trace_options_core_fops = {
7674 .open = tracing_open_generic,
7675 .read = trace_options_core_read,
7676 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007677 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05007678};
7679
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007680struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04007681 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007682 struct dentry *parent,
7683 void *data,
7684 const struct file_operations *fops)
7685{
7686 struct dentry *ret;
7687
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007688 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007689 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07007690 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007691
7692 return ret;
7693}
7694
7695
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007696static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007697{
7698 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05007699
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007700 if (tr->options)
7701 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007702
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007703 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007704 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05007705 return NULL;
7706
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007707 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007708 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07007709 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05007710 return NULL;
7711 }
7712
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007713 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05007714}
7715
Steven Rostedt577b7852009-02-26 23:43:05 -05007716static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007717create_trace_option_file(struct trace_array *tr,
7718 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007719 struct tracer_flags *flags,
7720 struct tracer_opt *opt)
7721{
7722 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05007723
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007724 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05007725 if (!t_options)
7726 return;
7727
7728 topt->flags = flags;
7729 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007730 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05007731
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007732 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05007733 &trace_options_fops);
7734
Steven Rostedt577b7852009-02-26 23:43:05 -05007735}
7736
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007737static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007738create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05007739{
7740 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007741 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05007742 struct tracer_flags *flags;
7743 struct tracer_opt *opts;
7744 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007745 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05007746
7747 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007748 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05007749
7750 flags = tracer->flags;
7751
7752 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007753 return;
7754
7755 /*
7756 * If this is an instance, only create flags for tracers
7757 * the instance may have.
7758 */
7759 if (!trace_ok_for_array(tracer, tr))
7760 return;
7761
7762 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08007763 /* Make sure there's no duplicate flags. */
7764 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007765 return;
7766 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007767
7768 opts = flags->opts;
7769
7770 for (cnt = 0; opts[cnt].name; cnt++)
7771 ;
7772
Steven Rostedt0cfe8242009-02-27 10:51:10 -05007773 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05007774 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007775 return;
7776
7777 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7778 GFP_KERNEL);
7779 if (!tr_topts) {
7780 kfree(topts);
7781 return;
7782 }
7783
7784 tr->topts = tr_topts;
7785 tr->topts[tr->nr_topts].tracer = tracer;
7786 tr->topts[tr->nr_topts].topts = topts;
7787 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05007788
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007789 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007790 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05007791 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04007792 WARN_ONCE(topts[cnt].entry == NULL,
7793 "Failed to create trace option: %s",
7794 opts[cnt].name);
7795 }
Steven Rostedt577b7852009-02-26 23:43:05 -05007796}
7797
Steven Rostedta8259072009-02-26 22:19:12 -05007798static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007799create_trace_option_core_file(struct trace_array *tr,
7800 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05007801{
7802 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05007803
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007804 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007805 if (!t_options)
7806 return NULL;
7807
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007808 return trace_create_file(option, 0644, t_options,
7809 (void *)&tr->trace_flags_index[index],
7810 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05007811}
7812
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007813static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05007814{
7815 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007816 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05007817 int i;
7818
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007819 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05007820 if (!t_options)
7821 return;
7822
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04007823 for (i = 0; trace_options[i]; i++) {
7824 if (top_level ||
7825 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7826 create_trace_option_core_file(tr, trace_options[i], i);
7827 }
Steven Rostedta8259072009-02-26 22:19:12 -05007828}
7829
Steven Rostedt499e5472012-02-22 15:50:28 -05007830static ssize_t
7831rb_simple_read(struct file *filp, char __user *ubuf,
7832 size_t cnt, loff_t *ppos)
7833{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007834 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05007835 char buf[64];
7836 int r;
7837
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007838 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05007839 r = sprintf(buf, "%d\n", r);
7840
7841 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7842}
7843
7844static ssize_t
7845rb_simple_write(struct file *filp, const char __user *ubuf,
7846 size_t cnt, loff_t *ppos)
7847{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04007848 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007849 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05007850 unsigned long val;
7851 int ret;
7852
7853 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7854 if (ret)
7855 return ret;
7856
7857 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007858 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04007859 if (!!val == tracer_tracing_is_on(tr)) {
7860 val = 0; /* do nothing */
7861 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007862 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007863 if (tr->current_trace->start)
7864 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007865 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04007866 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007867 if (tr->current_trace->stop)
7868 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05007869 }
7870 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05007871 }
7872
7873 (*ppos)++;
7874
7875 return cnt;
7876}
7877
7878static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007879 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007880 .read = rb_simple_read,
7881 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007882 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05007883 .llseek = default_llseek,
7884};
7885
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05007886static ssize_t
7887buffer_percent_read(struct file *filp, char __user *ubuf,
7888 size_t cnt, loff_t *ppos)
7889{
7890 struct trace_array *tr = filp->private_data;
7891 char buf[64];
7892 int r;
7893
7894 r = tr->buffer_percent;
7895 r = sprintf(buf, "%d\n", r);
7896
7897 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7898}
7899
7900static ssize_t
7901buffer_percent_write(struct file *filp, const char __user *ubuf,
7902 size_t cnt, loff_t *ppos)
7903{
7904 struct trace_array *tr = filp->private_data;
7905 unsigned long val;
7906 int ret;
7907
7908 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7909 if (ret)
7910 return ret;
7911
7912 if (val > 100)
7913 return -EINVAL;
7914
7915 if (!val)
7916 val = 1;
7917
7918 tr->buffer_percent = val;
7919
7920 (*ppos)++;
7921
7922 return cnt;
7923}
7924
7925static const struct file_operations buffer_percent_fops = {
7926 .open = tracing_open_generic_tr,
7927 .read = buffer_percent_read,
7928 .write = buffer_percent_write,
7929 .release = tracing_release_generic_tr,
7930 .llseek = default_llseek,
7931};
7932
Steven Rostedt277ba042012-08-03 16:10:49 -04007933struct dentry *trace_instance_dir;
7934
7935static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007936init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04007937
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007938static int
7939allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04007940{
7941 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007942
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007943 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007944
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05007945 buf->tr = tr;
7946
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007947 buf->buffer = ring_buffer_alloc(size, rb_flags);
7948 if (!buf->buffer)
7949 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007950
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007951 buf->data = alloc_percpu(struct trace_array_cpu);
7952 if (!buf->data) {
7953 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05007954 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007955 return -ENOMEM;
7956 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007957
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007958 /* Allocate the first page for all buffers */
7959 set_buffer_entries(&tr->trace_buffer,
7960 ring_buffer_size(tr->trace_buffer.buffer, 0));
7961
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007962 return 0;
7963}
7964
7965static int allocate_trace_buffers(struct trace_array *tr, int size)
7966{
7967 int ret;
7968
7969 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7970 if (ret)
7971 return ret;
7972
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007973#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007974 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7975 allocate_snapshot ? size : 1);
7976 if (WARN_ON(ret)) {
7977 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia24f2aaf2017-12-26 15:12:53 +08007978 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007979 free_percpu(tr->trace_buffer.data);
Jing Xia24f2aaf2017-12-26 15:12:53 +08007980 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007981 return -ENOMEM;
7982 }
7983 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007984
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05007985 /*
7986 * Only the top level trace array gets its snapshot allocated
7987 * from the kernel command line.
7988 */
7989 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007990#endif
7991 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007992}
7993
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04007994static void free_trace_buffer(struct trace_buffer *buf)
7995{
7996 if (buf->buffer) {
7997 ring_buffer_free(buf->buffer);
7998 buf->buffer = NULL;
7999 free_percpu(buf->data);
8000 buf->data = NULL;
8001 }
8002}
8003
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008004static void free_trace_buffers(struct trace_array *tr)
8005{
8006 if (!tr)
8007 return;
8008
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008009 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008010
8011#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008012 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008013#endif
8014}
8015
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008016static void init_trace_flags_index(struct trace_array *tr)
8017{
8018 int i;
8019
8020 /* Used by the trace options files */
8021 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8022 tr->trace_flags_index[i] = i;
8023}
8024
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008025static void __update_tracer_options(struct trace_array *tr)
8026{
8027 struct tracer *t;
8028
8029 for (t = trace_types; t; t = t->next)
8030 add_tracer_options(tr, t);
8031}
8032
8033static void update_tracer_options(struct trace_array *tr)
8034{
8035 mutex_lock(&trace_types_lock);
8036 __update_tracer_options(tr);
8037 mutex_unlock(&trace_types_lock);
8038}
8039
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008040static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008041{
Steven Rostedt277ba042012-08-03 16:10:49 -04008042 struct trace_array *tr;
8043 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008044
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008045 mutex_lock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008046 mutex_lock(&trace_types_lock);
8047
8048 ret = -EEXIST;
8049 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8050 if (tr->name && strcmp(tr->name, name) == 0)
8051 goto out_unlock;
8052 }
8053
8054 ret = -ENOMEM;
8055 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8056 if (!tr)
8057 goto out_unlock;
8058
8059 tr->name = kstrdup(name, GFP_KERNEL);
8060 if (!tr->name)
8061 goto out_free_tr;
8062
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008063 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8064 goto out_free_tr;
8065
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008066 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008067
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008068 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8069
Steven Rostedt277ba042012-08-03 16:10:49 -04008070 raw_spin_lock_init(&tr->start_lock);
8071
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008072 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8073
Steven Rostedt277ba042012-08-03 16:10:49 -04008074 tr->current_trace = &nop_trace;
8075
8076 INIT_LIST_HEAD(&tr->systems);
8077 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008078 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt277ba042012-08-03 16:10:49 -04008079
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008080 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008081 goto out_free_tr;
8082
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008083 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008084 if (!tr->dir)
8085 goto out_free_tr;
8086
8087 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008088 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008089 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008090 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07008091 }
Steven Rostedt277ba042012-08-03 16:10:49 -04008092
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008093 ftrace_init_trace_array(tr);
8094
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008095 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008096 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008097 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04008098
8099 list_add(&tr->list, &ftrace_trace_arrays);
8100
8101 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008102 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008103
8104 return 0;
8105
8106 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008107 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008108 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04008109 kfree(tr->name);
8110 kfree(tr);
8111
8112 out_unlock:
8113 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008114 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04008115
8116 return ret;
8117
8118}
8119
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008120static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008121{
8122 struct trace_array *tr;
8123 int found = 0;
8124 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008125 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008126
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008127 mutex_lock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008128 mutex_lock(&trace_types_lock);
8129
8130 ret = -ENODEV;
8131 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8132 if (tr->name && strcmp(tr->name, name) == 0) {
8133 found = 1;
8134 break;
8135 }
8136 }
8137 if (!found)
8138 goto out_unlock;
8139
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008140 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05008141 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008142 goto out_unlock;
8143
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008144 list_del(&tr->list);
8145
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008146 /* Disable all the flags that were enabled coming in */
8147 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8148 if ((1 << i) & ZEROED_TRACE_FLAGS)
8149 set_tracer_flag(tr, 1 << i, 0);
8150 }
8151
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05008152 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05308153 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008154 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09008155 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008156 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08008157 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04008158 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008159
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008160 for (i = 0; i < tr->nr_topts; i++) {
8161 kfree(tr->topts[i].topts);
8162 }
8163 kfree(tr->topts);
8164
Chunyu Hudb9108e02017-07-20 18:36:09 +08008165 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008166 kfree(tr->name);
8167 kfree(tr);
8168
8169 ret = 0;
8170
8171 out_unlock:
8172 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04008173 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04008174
8175 return ret;
8176}
8177
Steven Rostedt277ba042012-08-03 16:10:49 -04008178static __init void create_trace_instances(struct dentry *d_tracer)
8179{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05008180 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8181 instance_mkdir,
8182 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04008183 if (WARN_ON(!trace_instance_dir))
8184 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04008185}
8186
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008187static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008188init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008189{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008190 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008191 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008192
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05008193 trace_create_file("available_tracers", 0444, d_tracer,
8194 tr, &show_traces_fops);
8195
8196 trace_create_file("current_tracer", 0644, d_tracer,
8197 tr, &set_tracer_fops);
8198
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008199 trace_create_file("tracing_cpumask", 0644, d_tracer,
8200 tr, &tracing_cpumask_fops);
8201
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008202 trace_create_file("trace_options", 0644, d_tracer,
8203 tr, &tracing_iter_fops);
8204
8205 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008206 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008207
8208 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02008209 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008210
8211 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008212 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008213
8214 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8215 tr, &tracing_total_entries_fops);
8216
Wang YanQing238ae932013-05-26 16:52:01 +08008217 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008218 tr, &tracing_free_buffer_fops);
8219
8220 trace_create_file("trace_marker", 0220, d_tracer,
8221 tr, &tracing_mark_fops);
8222
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04008223 file = __find_event_file(tr, "ftrace", "print");
8224 if (file && file->dir)
8225 trace_create_file("trigger", 0644, file->dir, file,
8226 &event_trigger_fops);
8227 tr->trace_marker_file = file;
8228
Steven Rostedtfa32e852016-07-06 15:25:08 -04008229 trace_create_file("trace_marker_raw", 0220, d_tracer,
8230 tr, &tracing_mark_raw_fops);
8231
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008232 trace_create_file("trace_clock", 0644, d_tracer, tr,
8233 &trace_clock_fops);
8234
8235 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008236 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008237
Tom Zanussi2c1ea602018-01-15 20:51:41 -06008238 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8239 &trace_time_stamp_mode_fops);
8240
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05008241 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008242
8243 trace_create_file("buffer_percent", 0444, d_tracer,
8244 tr, &buffer_percent_fops);
8245
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008246 create_trace_options_dir(tr);
8247
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04008248#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05008249 trace_create_file("tracing_max_latency", 0644, d_tracer,
8250 &tr->max_latency, &tracing_max_lat_fops);
8251#endif
8252
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05008253 if (ftrace_create_function_files(tr, d_tracer))
8254 WARN(1, "Could not allocate function filter files");
8255
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008256#ifdef CONFIG_TRACER_SNAPSHOT
8257 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008258 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05008259#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008260
8261 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008262 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05008263
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04008264 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008265}
8266
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008267static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008268{
8269 struct vfsmount *mnt;
8270 struct file_system_type *type;
8271
8272 /*
8273 * To maintain backward compatibility for tools that mount
8274 * debugfs to get to the tracing facility, tracefs is automatically
8275 * mounted to the debugfs/tracing directory.
8276 */
8277 type = get_fs_type("tracefs");
8278 if (!type)
8279 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13008280 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008281 put_filesystem(type);
8282 if (IS_ERR(mnt))
8283 return NULL;
8284 mntget(mnt);
8285
8286 return mnt;
8287}
8288
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008289/**
8290 * tracing_init_dentry - initialize top level trace array
8291 *
8292 * This is called when creating files or directories in the tracing
8293 * directory. It is called via fs_initcall() by any of the boot up code
8294 * and expects to return the dentry of the top level tracing directory.
8295 */
8296struct dentry *tracing_init_dentry(void)
8297{
8298 struct trace_array *tr = &global_trace;
8299
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008300 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008301 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008302 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008303
Jiaxing Wang8b129192015-11-06 16:04:16 +08008304 if (WARN_ON(!tracefs_initialized()) ||
8305 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8306 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008307 return ERR_PTR(-ENODEV);
8308
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05008309 /*
8310 * As there may still be users that expect the tracing
8311 * files to exist in debugfs/tracing, we must automount
8312 * the tracefs file system there, so older tools still
8313 * work with the newer kerenl.
8314 */
8315 tr->dir = debugfs_create_automount("tracing", NULL,
8316 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008317 if (!tr->dir) {
8318 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8319 return ERR_PTR(-ENOMEM);
8320 }
8321
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008322 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008323}
8324
Jeremy Linton00f4b652017-05-31 16:56:43 -05008325extern struct trace_eval_map *__start_ftrace_eval_maps[];
8326extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008327
Jeremy Linton5f60b352017-05-31 16:56:47 -05008328static void __init trace_eval_init(void)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008329{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008330 int len;
8331
Jeremy Linton02fd7f62017-05-31 16:56:42 -05008332 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008333 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008334}
8335
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008336#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008337static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008338{
Jeremy Linton99be6472017-05-31 16:56:44 -05008339 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008340 return;
8341
8342 /*
8343 * Modules with bad taint do not have events created, do
8344 * not bother with enums either.
8345 */
8346 if (trace_module_has_bad_taint(mod))
8347 return;
8348
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008349 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008350}
8351
Jeremy Linton681bec02017-05-31 16:56:53 -05008352#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008353static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008354{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008355 union trace_eval_map_item *map;
8356 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008357
Jeremy Linton99be6472017-05-31 16:56:44 -05008358 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008359 return;
8360
Jeremy Linton1793ed92017-05-31 16:56:46 -05008361 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008362
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05008363 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008364
8365 while (map) {
8366 if (map->head.mod == mod)
8367 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05008368 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008369 last = &map->tail.next;
8370 map = map->tail.next;
8371 }
8372 if (!map)
8373 goto out;
8374
Jeremy Linton5f60b352017-05-31 16:56:47 -05008375 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008376 kfree(map);
8377 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05008378 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008379}
8380#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008381static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05008382#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008383
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008384static int trace_module_notify(struct notifier_block *self,
8385 unsigned long val, void *data)
8386{
8387 struct module *mod = data;
8388
8389 switch (val) {
8390 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008391 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008392 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008393 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008394 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008395 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008396 }
8397
8398 return 0;
8399}
8400
8401static struct notifier_block trace_module_nb = {
8402 .notifier_call = trace_module_notify,
8403 .priority = 0,
8404};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008405#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008406
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008407static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008408{
8409 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008410
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08008411 trace_access_lock_init();
8412
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008413 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008414 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09008415 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008416
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04008417 event_trace_init();
8418
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008419 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedt (Red Hat)501c2372016-07-05 10:04:34 -04008420 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008421
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008422 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04008423 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008424
Li Zefan339ae5d2009-04-17 10:34:30 +08008425 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008426 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02008427
Avadh Patel69abe6a2009-04-10 16:04:48 -04008428 trace_create_file("saved_cmdlines", 0444, d_tracer,
8429 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03008430
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008431 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8432 NULL, &tracing_saved_cmdlines_size_fops);
8433
Michael Sartain99c621d2017-07-05 22:07:15 -06008434 trace_create_file("saved_tgids", 0444, d_tracer,
8435 NULL, &tracing_saved_tgids_fops);
8436
Jeremy Linton5f60b352017-05-31 16:56:47 -05008437 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008438
Jeremy Lintonf57a4142017-05-31 16:56:48 -05008439 trace_create_eval_file(d_tracer);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04008440
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04008441#ifdef CONFIG_MODULES
8442 register_module_notifier(&trace_module_nb);
8443#endif
8444
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008445#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008446 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8447 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008448#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008449
Steven Rostedt277ba042012-08-03 16:10:49 -04008450 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09008451
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008452 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05008453
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01008454 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008455}
8456
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008457static int trace_panic_handler(struct notifier_block *this,
8458 unsigned long event, void *unused)
8459{
Steven Rostedt944ac422008-10-23 19:26:08 -04008460 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008461 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008462 return NOTIFY_OK;
8463}
8464
8465static struct notifier_block trace_panic_notifier = {
8466 .notifier_call = trace_panic_handler,
8467 .next = NULL,
8468 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8469};
8470
8471static int trace_die_handler(struct notifier_block *self,
8472 unsigned long val,
8473 void *data)
8474{
8475 switch (val) {
8476 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04008477 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008478 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008479 break;
8480 default:
8481 break;
8482 }
8483 return NOTIFY_OK;
8484}
8485
8486static struct notifier_block trace_die_notifier = {
8487 .notifier_call = trace_die_handler,
8488 .priority = 200
8489};
8490
8491/*
8492 * printk is set to max of 1024, we really don't need it that big.
8493 * Nothing should be printing 1000 characters anyway.
8494 */
8495#define TRACE_MAX_PRINT 1000
8496
8497/*
8498 * Define here KERN_TRACE so that we have one place to modify
8499 * it if we decide to change what log level the ftrace dump
8500 * should be at.
8501 */
Steven Rostedt428aee12009-01-14 12:24:42 -05008502#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008503
Jason Wessel955b61e2010-08-05 09:22:23 -05008504void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008505trace_printk_seq(struct trace_seq *s)
8506{
8507 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008508 if (s->seq.len >= TRACE_MAX_PRINT)
8509 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008510
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05008511 /*
8512 * More paranoid code. Although the buffer size is set to
8513 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8514 * an extra layer of protection.
8515 */
8516 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8517 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008518
8519 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04008520 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008521
8522 printk(KERN_TRACE "%s", s->buffer);
8523
Steven Rostedtf9520752009-03-02 14:04:40 -05008524 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008525}
8526
Jason Wessel955b61e2010-08-05 09:22:23 -05008527void trace_init_global_iter(struct trace_iterator *iter)
8528{
8529 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008530 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05008531 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008532 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07008533
8534 if (iter->trace && iter->trace->open)
8535 iter->trace->open(iter);
8536
8537 /* Annotate start of buffers if we had overruns */
8538 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8539 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8540
8541 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8542 if (trace_clocks[iter->tr->clock_id].in_ns)
8543 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05008544}
8545
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008546void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008547{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008548 /* use static because iter can be a bit big for the stack */
8549 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008550 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008551 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008552 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04008553 unsigned long flags;
8554 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008555
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008556 /* Only allow one dump user at a time. */
8557 if (atomic_inc_return(&dump_running) != 1) {
8558 atomic_dec(&dump_running);
8559 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04008560 }
8561
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008562 /*
8563 * Always turn off tracing when we dump.
8564 * We don't need to show trace output of what happens
8565 * between multiple crashes.
8566 *
8567 * If the user does a sysrq-z, then they can re-enable
8568 * tracing with echo 1 > tracing_on.
8569 */
8570 tracing_off();
8571
8572 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02008573 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008574
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08008575 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05008576 trace_init_global_iter(&iter);
8577
Steven Rostedtd7690412008-10-01 00:29:53 -04008578 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05308579 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04008580 }
8581
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008582 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008583
Török Edwinb54d3de2008-11-22 13:28:48 +02008584 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008585 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02008586
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008587 switch (oops_dump_mode) {
8588 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05008589 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008590 break;
8591 case DUMP_ORIG:
8592 iter.cpu_file = raw_smp_processor_id();
8593 break;
8594 case DUMP_NONE:
8595 goto out_enable;
8596 default:
8597 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05008598 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008599 }
8600
8601 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008602
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008603 /* Did function tracer already get disabled? */
8604 if (ftrace_is_dead()) {
8605 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8606 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8607 }
8608
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008609 /*
8610 * We need to stop all tracing on all CPUS to read the
8611 * the next buffer. This is a bit expensive, but is
8612 * not done often. We fill all what we can read,
8613 * and then release the locks again.
8614 */
8615
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008616 while (!trace_empty(&iter)) {
8617
8618 if (!cnt)
8619 printk(KERN_TRACE "---------------------------------\n");
8620
8621 cnt++;
8622
8623 /* reset all but tr, trace, and overruns */
8624 memset(&iter.seq, 0,
8625 sizeof(struct trace_iterator) -
8626 offsetof(struct trace_iterator, seq));
8627 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8628 iter.pos = -1;
8629
Jason Wessel955b61e2010-08-05 09:22:23 -05008630 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08008631 int ret;
8632
8633 ret = print_trace_line(&iter);
8634 if (ret != TRACE_TYPE_NO_CONSUME)
8635 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008636 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05008637 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008638
8639 trace_printk_seq(&iter.seq);
8640 }
8641
8642 if (!cnt)
8643 printk(KERN_TRACE " (ftrace buffer empty)\n");
8644 else
8645 printk(KERN_TRACE "---------------------------------\n");
8646
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02008647 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008648 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008649
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04008650 for_each_tracing_cpu(cpu) {
8651 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008652 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02008653 atomic_dec(&dump_running);
8654 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04008655 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008656}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07008657EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01008658
Tom Zanussi7e465ba2017-09-22 14:58:20 -05008659int trace_run_command(const char *buf, int (*createfn)(int, char **))
8660{
8661 char **argv;
8662 int argc, ret;
8663
8664 argc = 0;
8665 ret = 0;
8666 argv = argv_split(GFP_KERNEL, buf, &argc);
8667 if (!argv)
8668 return -ENOMEM;
8669
8670 if (argc)
8671 ret = createfn(argc, argv);
8672
8673 argv_free(argv);
8674
8675 return ret;
8676}
8677
8678#define WRITE_BUFSIZE 4096
8679
8680ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8681 size_t count, loff_t *ppos,
8682 int (*createfn)(int, char **))
8683{
8684 char *kbuf, *buf, *tmp;
8685 int ret = 0;
8686 size_t done = 0;
8687 size_t size;
8688
8689 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8690 if (!kbuf)
8691 return -ENOMEM;
8692
8693 while (done < count) {
8694 size = count - done;
8695
8696 if (size >= WRITE_BUFSIZE)
8697 size = WRITE_BUFSIZE - 1;
8698
8699 if (copy_from_user(kbuf, buffer + done, size)) {
8700 ret = -EFAULT;
8701 goto out;
8702 }
8703 kbuf[size] = '\0';
8704 buf = kbuf;
8705 do {
8706 tmp = strchr(buf, '\n');
8707 if (tmp) {
8708 *tmp = '\0';
8709 size = tmp - buf + 1;
8710 } else {
8711 size = strlen(buf);
8712 if (done + size < count) {
8713 if (buf != kbuf)
8714 break;
8715 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8716 pr_warn("Line length is too long: Should be less than %d\n",
8717 WRITE_BUFSIZE - 2);
8718 ret = -EINVAL;
8719 goto out;
8720 }
8721 }
8722 done += size;
8723
8724 /* Remove comments */
8725 tmp = strchr(buf, '#');
8726
8727 if (tmp)
8728 *tmp = '\0';
8729
8730 ret = trace_run_command(buf, createfn);
8731 if (ret)
8732 goto out;
8733 buf += size;
8734
8735 } while (done < count);
8736 }
8737 ret = done;
8738
8739out:
8740 kfree(kbuf);
8741
8742 return ret;
8743}
8744
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008745__init static int tracer_alloc_buffers(void)
8746{
Steven Rostedt73c51622009-03-11 13:42:01 -04008747 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308748 int ret = -ENOMEM;
8749
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008750 /*
8751 * Make sure we don't accidently add more trace options
8752 * than we have bits for.
8753 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008754 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04008755
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308756 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8757 goto out;
8758
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008759 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308760 goto out_free_buffer_mask;
8761
Steven Rostedt07d777f2011-09-22 14:01:55 -04008762 /* Only allocate trace_printk buffers if a trace_printk exists */
8763 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04008764 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04008765 trace_printk_init_buffers();
8766
Steven Rostedt73c51622009-03-11 13:42:01 -04008767 /* To save memory, keep the ring buffer size to its minimum */
8768 if (ring_buffer_expanded)
8769 ring_buf_size = trace_buf_size;
8770 else
8771 ring_buf_size = 1;
8772
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308773 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008774 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008775
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008776 raw_spin_lock_init(&global_trace.start_lock);
8777
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008778 /*
8779 * The prepare callbacks allocates some memory for the ring buffer. We
8780 * don't free the buffer if the if the CPU goes down. If we were to free
8781 * the buffer, then the user would lose any trace that was in the
8782 * buffer. The memory will be removed once the "instance" is removed.
8783 */
8784 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8785 "trace/RB:preapre", trace_rb_cpu_prepare,
8786 NULL);
8787 if (ret < 0)
8788 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008789 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03008790 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008791 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8792 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008793 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008794
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008795 if (trace_create_savedcmd() < 0)
8796 goto out_free_temp_buffer;
8797
Steven Rostedtab464282008-05-12 21:21:00 +02008798 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008799 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008800 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8801 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008802 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008803 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04008804
Steven Rostedt499e5472012-02-22 15:50:28 -05008805 if (global_trace.buffer_disabled)
8806 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008807
Steven Rostedte1e232c2014-02-10 23:38:46 -05008808 if (trace_boot_clock) {
8809 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8810 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008811 pr_warn("Trace clock %s not defined, going back to default\n",
8812 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05008813 }
8814
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008815 /*
8816 * register_tracer() might reference current_trace, so it
8817 * needs to be set before we register anything. This is
8818 * just a bootstrap of current_trace anyway.
8819 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008820 global_trace.current_trace = &nop_trace;
8821
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008822 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8823
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05008824 ftrace_init_global_array_ops(&global_trace);
8825
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008826 init_trace_flags_index(&global_trace);
8827
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04008828 register_tracer(&nop_trace);
8829
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05008830 /* Function tracing may start here (via kernel command line) */
8831 init_function_trace();
8832
Steven Rostedt60a11772008-05-12 21:20:44 +02008833 /* All seems OK, enable tracing */
8834 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04008835
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008836 atomic_notifier_chain_register(&panic_notifier_list,
8837 &trace_panic_notifier);
8838
8839 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008840
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008841 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8842
8843 INIT_LIST_HEAD(&global_trace.systems);
8844 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008845 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04008846 list_add(&global_trace.list, &ftrace_trace_arrays);
8847
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08008848 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04008849
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008850 register_snapshot_cmd();
8851
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01008852 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04008853
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09008854out_free_savedcmd:
8855 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04008856out_free_temp_buffer:
8857 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01008858out_rm_hp_state:
8859 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308860out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008861 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10308862out_free_buffer_mask:
8863 free_cpumask_var(tracing_buffer_mask);
8864out:
8865 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008866}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008867
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008868void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008869{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008870 if (tracepoint_printk) {
8871 tracepoint_print_iter =
8872 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8873 if (WARN_ON(!tracepoint_print_iter))
8874 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05008875 else
8876 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05008877 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008878 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05008879}
8880
8881void __init trace_init(void)
8882{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04008883 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05008884}
8885
Steven Rostedtb2821ae2009-02-02 21:38:32 -05008886__init static int clear_boot_tracer(void)
8887{
8888 /*
8889 * The default tracer at boot buffer is an init section.
8890 * This function is called in lateinit. If we did not
8891 * find the boot tracer, then clear it out, to prevent
8892 * later registration from accessing the buffer that is
8893 * about to be freed.
8894 */
8895 if (!default_bootup_tracer)
8896 return 0;
8897
8898 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8899 default_bootup_tracer);
8900 default_bootup_tracer = NULL;
8901
8902 return 0;
8903}
8904
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008905fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04008906late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01008907
8908#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8909__init static int tracing_set_default_clock(void)
8910{
8911 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01008912 if (!trace_boot_clock && !sched_clock_stable()) {
Chris Wilson3fd49c92018-03-30 16:01:31 +01008913 printk(KERN_WARNING
8914 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8915 "If you want to keep using the local clock, then add:\n"
8916 " \"trace_clock=local\"\n"
8917 "on the kernel command line\n");
8918 tracing_set_clock(&global_trace, "global");
8919 }
8920
8921 return 0;
8922}
8923late_initcall_sync(tracing_set_default_clock);
8924#endif