blob: 82833be07c1ef7c3f5688f07db88b61f5e859dbd [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
Masami Hiramatsu60efe212020-12-08 17:54:09 +090071 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
Steven Rostedtb2821ae2009-02-02 21:38:32 -050073 */
Li Zefan020e5f82009-07-01 10:47:05 +080074bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050075
Masami Hiramatsu60efe212020-12-08 17:54:09 +090076#ifdef CONFIG_FTRACE_STARTUP_TEST
77void __init disable_tracing_selftest(const char *reason)
78{
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
82 }
83}
84#endif
85
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050086/* Pipe tracepoints to printk */
87struct trace_iterator *tracepoint_print_iter;
88int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050089static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050090
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010091/* For tracers that don't implement custom flags */
92static struct tracer_opt dummy_tracer_opt[] = {
93 { }
94};
95
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050096static int
97dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010098{
99 return 0;
100}
Steven Rostedt0f048702008-11-05 16:05:44 -0500101
102/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
105 * occurred.
106 */
Joel Fernandesd914ba32017-06-26 19:01:55 -0700107static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400108
109/*
Steven Rostedt0f048702008-11-05 16:05:44 -0500110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
113 * this back to zero.
114 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100115static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500116
Jason Wessel955b61e2010-08-05 09:22:23 -0500117cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200118
Steven Rostedt944ac422008-10-23 19:26:08 -0400119/*
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
121 *
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
126 * serial console.
127 *
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400133 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200134
135enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400136
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400137/* When set, tracing will stop when a WARN*() is hit */
138int __disable_trace_on_warning;
139
Jeremy Linton681bec02017-05-31 16:56:53 -0500140#ifdef CONFIG_TRACE_EVAL_MAP_FILE
141/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500142struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400143 struct module *mod;
144 unsigned long length;
145};
146
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500147union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500149struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400150 /*
151 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500152 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400153 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500154 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 const char *end; /* points to NULL */
156};
157
Jeremy Linton1793ed92017-05-31 16:56:46 -0500158static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400159
160/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500161 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500165 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400166 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500167union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500168 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500169 struct trace_eval_map_head head;
170 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400171};
172
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500173static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500174#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400175
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900176int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -0500177static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100179 unsigned int trace_ctx);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500180
Li Zefanee6c2c12009-09-18 14:06:47 +0800181#define MAX_TRACER_SIZE 100
182static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500183static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100184
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500185static bool allocate_snapshot;
186
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200187static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100188{
Chen Gang67012ab2013-04-08 12:06:44 +0800189 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500190 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400191 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500192 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100193 return 1;
194}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200195__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100196
Steven Rostedt944ac422008-10-23 19:26:08 -0400197static int __init set_ftrace_dump_on_oops(char *str)
198{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200199 if (*str++ != '=' || !*str) {
200 ftrace_dump_on_oops = DUMP_ALL;
201 return 1;
202 }
203
204 if (!strcmp("orig_cpu", str)) {
205 ftrace_dump_on_oops = DUMP_ORIG;
206 return 1;
207 }
208
209 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400210}
211__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200212
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400213static int __init stop_trace_on_warning(char *str)
214{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200215 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
216 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400217 return 1;
218}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200219__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400220
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400221static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500222{
223 allocate_snapshot = true;
224 /* We also need the main ring buffer expanded */
225 ring_buffer_expanded = true;
226 return 1;
227}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400228__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500229
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400230
231static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400232
233static int __init set_trace_boot_options(char *str)
234{
Chen Gang67012ab2013-04-08 12:06:44 +0800235 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400236 return 0;
237}
238__setup("trace_options=", set_trace_boot_options);
239
Steven Rostedte1e232c2014-02-10 23:38:46 -0500240static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
241static char *trace_boot_clock __initdata;
242
243static int __init set_trace_boot_clock(char *str)
244{
245 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
246 trace_boot_clock = trace_boot_clock_buf;
247 return 0;
248}
249__setup("trace_clock=", set_trace_boot_clock);
250
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500251static int __init set_tracepoint_printk(char *str)
252{
253 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
254 tracepoint_printk = 1;
255 return 1;
256}
257__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400258
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100259unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200260{
261 nsec += 500;
262 do_div(nsec, 1000);
263 return nsec;
264}
265
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300266static void
267trace_process_export(struct trace_export *export,
268 struct ring_buffer_event *event, int flag)
269{
270 struct trace_entry *entry;
271 unsigned int size = 0;
272
273 if (export->flags & flag) {
274 entry = ring_buffer_event_data(event);
275 size = ring_buffer_event_length(event);
276 export->write(export, entry, size);
277 }
278}
279
280static DEFINE_MUTEX(ftrace_export_lock);
281
282static struct trace_export __rcu *ftrace_exports_list __read_mostly;
283
284static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
285static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300286static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300287
288static inline void ftrace_exports_enable(struct trace_export *export)
289{
290 if (export->flags & TRACE_EXPORT_FUNCTION)
291 static_branch_inc(&trace_function_exports_enabled);
292
293 if (export->flags & TRACE_EXPORT_EVENT)
294 static_branch_inc(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300295
296 if (export->flags & TRACE_EXPORT_MARKER)
297 static_branch_inc(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300298}
299
300static inline void ftrace_exports_disable(struct trace_export *export)
301{
302 if (export->flags & TRACE_EXPORT_FUNCTION)
303 static_branch_dec(&trace_function_exports_enabled);
304
305 if (export->flags & TRACE_EXPORT_EVENT)
306 static_branch_dec(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300307
308 if (export->flags & TRACE_EXPORT_MARKER)
309 static_branch_dec(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300310}
311
312static void ftrace_exports(struct ring_buffer_event *event, int flag)
313{
314 struct trace_export *export;
315
316 preempt_disable_notrace();
317
318 export = rcu_dereference_raw_check(ftrace_exports_list);
319 while (export) {
320 trace_process_export(export, event, flag);
321 export = rcu_dereference_raw_check(export->next);
322 }
323
324 preempt_enable_notrace();
325}
326
327static inline void
328add_trace_export(struct trace_export **list, struct trace_export *export)
329{
330 rcu_assign_pointer(export->next, *list);
331 /*
332 * We are entering export into the list but another
333 * CPU might be walking that list. We need to make sure
334 * the export->next pointer is valid before another CPU sees
335 * the export pointer included into the list.
336 */
337 rcu_assign_pointer(*list, export);
338}
339
340static inline int
341rm_trace_export(struct trace_export **list, struct trace_export *export)
342{
343 struct trace_export **p;
344
345 for (p = list; *p != NULL; p = &(*p)->next)
346 if (*p == export)
347 break;
348
349 if (*p != export)
350 return -1;
351
352 rcu_assign_pointer(*p, (*p)->next);
353
354 return 0;
355}
356
357static inline void
358add_ftrace_export(struct trace_export **list, struct trace_export *export)
359{
360 ftrace_exports_enable(export);
361
362 add_trace_export(list, export);
363}
364
365static inline int
366rm_ftrace_export(struct trace_export **list, struct trace_export *export)
367{
368 int ret;
369
370 ret = rm_trace_export(list, export);
371 ftrace_exports_disable(export);
372
373 return ret;
374}
375
376int register_ftrace_export(struct trace_export *export)
377{
378 if (WARN_ON_ONCE(!export->write))
379 return -1;
380
381 mutex_lock(&ftrace_export_lock);
382
383 add_ftrace_export(&ftrace_exports_list, export);
384
385 mutex_unlock(&ftrace_export_lock);
386
387 return 0;
388}
389EXPORT_SYMBOL_GPL(register_ftrace_export);
390
391int unregister_ftrace_export(struct trace_export *export)
392{
393 int ret;
394
395 mutex_lock(&ftrace_export_lock);
396
397 ret = rm_ftrace_export(&ftrace_exports_list, export);
398
399 mutex_unlock(&ftrace_export_lock);
400
401 return ret;
402}
403EXPORT_SYMBOL_GPL(unregister_ftrace_export);
404
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400405/* trace_flags holds trace_options default values */
406#define TRACE_DEFAULT_FLAGS \
407 (FUNCTION_DEFAULT_FLAGS | \
408 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
409 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
410 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
Steven Rostedt (VMware)99e22ce2021-02-12 11:51:06 -0500411 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
412 TRACE_ITER_HASH_PTR)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400413
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400414/* trace_options that are only supported by global_trace */
415#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
416 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
417
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400418/* trace_flags that are default zero for instances */
419#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900420 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400421
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200422/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800423 * The global_trace is the descriptor that holds the top-level tracing
424 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200425 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400426static struct trace_array global_trace = {
427 .trace_flags = TRACE_DEFAULT_FLAGS,
428};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200429
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400430LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200431
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400432int trace_array_get(struct trace_array *this_tr)
433{
434 struct trace_array *tr;
435 int ret = -ENODEV;
436
437 mutex_lock(&trace_types_lock);
438 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
439 if (tr == this_tr) {
440 tr->ref++;
441 ret = 0;
442 break;
443 }
444 }
445 mutex_unlock(&trace_types_lock);
446
447 return ret;
448}
449
450static void __trace_array_put(struct trace_array *this_tr)
451{
452 WARN_ON(!this_tr->ref);
453 this_tr->ref--;
454}
455
Divya Indi28879782019-11-20 11:08:38 -0800456/**
457 * trace_array_put - Decrement the reference counter for this trace array.
Bean Huo557d50e2021-01-12 12:12:02 +0100458 * @this_tr : pointer to the trace array
Divya Indi28879782019-11-20 11:08:38 -0800459 *
460 * NOTE: Use this when we no longer need the trace array returned by
461 * trace_array_get_by_name(). This ensures the trace array can be later
462 * destroyed.
463 *
464 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400465void trace_array_put(struct trace_array *this_tr)
466{
Divya Indi28879782019-11-20 11:08:38 -0800467 if (!this_tr)
468 return;
469
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400470 mutex_lock(&trace_types_lock);
471 __trace_array_put(this_tr);
472 mutex_unlock(&trace_types_lock);
473}
Divya Indi28879782019-11-20 11:08:38 -0800474EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400475
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400476int tracing_check_open_get_tr(struct trace_array *tr)
477{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400478 int ret;
479
480 ret = security_locked_down(LOCKDOWN_TRACEFS);
481 if (ret)
482 return ret;
483
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400484 if (tracing_disabled)
485 return -ENODEV;
486
487 if (tr && trace_array_get(tr) < 0)
488 return -ENODEV;
489
490 return 0;
491}
492
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400493int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500494 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500495 struct ring_buffer_event *event)
496{
497 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
498 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400499 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500500 return 1;
501 }
502
503 return 0;
504}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500505
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400506void trace_free_pid_list(struct trace_pid_list *pid_list)
507{
508 vfree(pid_list->pids);
509 kfree(pid_list);
510}
511
Steven Rostedtd8275c42016-04-14 12:15:22 -0400512/**
513 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
514 * @filtered_pids: The list of pids to check
515 * @search_pid: The PID to find in @filtered_pids
516 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100517 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400518 */
519bool
520trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
521{
522 /*
523 * If pid_max changed after filtered_pids was created, we
524 * by default ignore all pids greater than the previous pid_max.
525 */
526 if (search_pid >= filtered_pids->pid_max)
527 return false;
528
529 return test_bit(search_pid, filtered_pids->pids);
530}
531
532/**
533 * trace_ignore_this_task - should a task be ignored for tracing
534 * @filtered_pids: The list of pids to check
Qiujun Huangb3ca59f2020-12-31 10:35:58 -0500535 * @filtered_no_pids: The list of pids not to be traced
Steven Rostedtd8275c42016-04-14 12:15:22 -0400536 * @task: The task that should be ignored if not filtered
537 *
538 * Checks if @task should be traced or not from @filtered_pids.
539 * Returns true if @task should *NOT* be traced.
540 * Returns false if @task should be traced.
541 */
542bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400543trace_ignore_this_task(struct trace_pid_list *filtered_pids,
544 struct trace_pid_list *filtered_no_pids,
545 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400546{
547 /*
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100548 * If filtered_no_pids is not empty, and the task's pid is listed
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400549 * in filtered_no_pids, then return true.
550 * Otherwise, if filtered_pids is empty, that means we can
551 * trace all tasks. If it has content, then only trace pids
552 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400553 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400554
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400555 return (filtered_pids &&
556 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
557 (filtered_no_pids &&
558 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400559}
560
561/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700562 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400563 * @pid_list: The list to modify
564 * @self: The current task for fork or NULL for exit
565 * @task: The task to add or remove
566 *
567 * If adding a task, if @self is defined, the task is only added if @self
568 * is also included in @pid_list. This happens on fork and tasks should
569 * only be added when the parent is listed. If @self is NULL, then the
570 * @task pid will be removed from the list, which would happen on exit
571 * of a task.
572 */
573void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
574 struct task_struct *self,
575 struct task_struct *task)
576{
577 if (!pid_list)
578 return;
579
580 /* For forks, we only add if the forking task is listed */
581 if (self) {
582 if (!trace_find_filtered_pid(pid_list, self->pid))
583 return;
584 }
585
586 /* Sorry, but we don't support pid_max changing after setting */
587 if (task->pid >= pid_list->pid_max)
588 return;
589
590 /* "self" is set for forks, and NULL for exits */
591 if (self)
592 set_bit(task->pid, pid_list->pids);
593 else
594 clear_bit(task->pid, pid_list->pids);
595}
596
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400597/**
598 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
599 * @pid_list: The pid list to show
600 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
601 * @pos: The position of the file
602 *
603 * This is used by the seq_file "next" operation to iterate the pids
604 * listed in a trace_pid_list structure.
605 *
606 * Returns the pid+1 as we want to display pid of zero, but NULL would
607 * stop the iteration.
608 */
609void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
610{
611 unsigned long pid = (unsigned long)v;
612
613 (*pos)++;
614
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100615 /* pid already is +1 of the actual previous bit */
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400616 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
617
618 /* Return pid + 1 to allow zero to be represented */
619 if (pid < pid_list->pid_max)
620 return (void *)(pid + 1);
621
622 return NULL;
623}
624
625/**
626 * trace_pid_start - Used for seq_file to start reading pid lists
627 * @pid_list: The pid list to show
628 * @pos: The position of the file
629 *
630 * This is used by seq_file "start" operation to start the iteration
631 * of listing pids.
632 *
633 * Returns the pid+1 as we want to display pid of zero, but NULL would
634 * stop the iteration.
635 */
636void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
637{
638 unsigned long pid;
639 loff_t l = 0;
640
641 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
642 if (pid >= pid_list->pid_max)
643 return NULL;
644
645 /* Return pid + 1 so that zero can be the exit value */
646 for (pid++; pid && l < *pos;
647 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
648 ;
649 return (void *)pid;
650}
651
652/**
653 * trace_pid_show - show the current pid in seq_file processing
654 * @m: The seq_file structure to write into
655 * @v: A void pointer of the pid (+1) value to display
656 *
657 * Can be directly used by seq_file operations to display the current
658 * pid value.
659 */
660int trace_pid_show(struct seq_file *m, void *v)
661{
662 unsigned long pid = (unsigned long)v - 1;
663
664 seq_printf(m, "%lu\n", pid);
665 return 0;
666}
667
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400668/* 128 should be much more than enough */
669#define PID_BUF_SIZE 127
670
671int trace_pid_write(struct trace_pid_list *filtered_pids,
672 struct trace_pid_list **new_pid_list,
673 const char __user *ubuf, size_t cnt)
674{
675 struct trace_pid_list *pid_list;
676 struct trace_parser parser;
677 unsigned long val;
678 int nr_pids = 0;
679 ssize_t read = 0;
680 ssize_t ret = 0;
681 loff_t pos;
682 pid_t pid;
683
684 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
685 return -ENOMEM;
686
687 /*
688 * Always recreate a new array. The write is an all or nothing
689 * operation. Always create a new array when adding new pids by
690 * the user. If the operation fails, then the current list is
691 * not modified.
692 */
693 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500694 if (!pid_list) {
695 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400696 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500697 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400698
699 pid_list->pid_max = READ_ONCE(pid_max);
700
701 /* Only truncating will shrink pid_max */
702 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
703 pid_list->pid_max = filtered_pids->pid_max;
704
705 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
706 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500707 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400708 kfree(pid_list);
709 return -ENOMEM;
710 }
711
712 if (filtered_pids) {
713 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000714 for_each_set_bit(pid, filtered_pids->pids,
715 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400716 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400717 nr_pids++;
718 }
719 }
720
721 while (cnt > 0) {
722
723 pos = 0;
724
725 ret = trace_get_user(&parser, ubuf, cnt, &pos);
726 if (ret < 0 || !trace_parser_loaded(&parser))
727 break;
728
729 read += ret;
730 ubuf += ret;
731 cnt -= ret;
732
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400733 ret = -EINVAL;
734 if (kstrtoul(parser.buffer, 0, &val))
735 break;
736 if (val >= pid_list->pid_max)
737 break;
738
739 pid = (pid_t)val;
740
741 set_bit(pid, pid_list->pids);
742 nr_pids++;
743
744 trace_parser_clear(&parser);
745 ret = 0;
746 }
747 trace_parser_put(&parser);
748
749 if (ret < 0) {
750 trace_free_pid_list(pid_list);
751 return ret;
752 }
753
754 if (!nr_pids) {
755 /* Cleared the list of pids */
756 trace_free_pid_list(pid_list);
757 read = ret;
758 pid_list = NULL;
759 }
760
761 *new_pid_list = pid_list;
762
763 return read;
764}
765
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500766static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400767{
768 u64 ts;
769
770 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700771 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400772 return trace_clock_local();
773
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +0300774 ts = ring_buffer_time_stamp(buf->buffer);
Alexander Z Lam94571582013-08-02 18:36:16 -0700775 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400776
777 return ts;
778}
779
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100780u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700781{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500782 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700783}
784
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785/**
Qiujun Huangb3ca59f2020-12-31 10:35:58 -0500786 * tracing_is_enabled - Show if global_trace has been enabled
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400787 *
788 * Shows if the global trace has been enabled or not. It uses the
789 * mirror flag "buffer_disabled" to be used in fast paths such as for
790 * the irqsoff tracer. But it may be inaccurate due to races. If you
791 * need to know the accurate state, use tracing_is_on() which is a little
792 * slower, but accurate.
793 */
Steven Rostedt90369902008-11-05 16:05:44 -0500794int tracing_is_enabled(void)
795{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400796 /*
797 * For quick access (irqsoff uses this in fast path), just
798 * return the mirror variable of the state of the ring buffer.
799 * It's a little racy, but we don't really care.
800 */
801 smp_rmb();
802 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500803}
804
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200805/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400806 * trace_buf_size is the size in bytes that is allocated
807 * for a buffer. Note, the number of bytes is always rounded
808 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400809 *
810 * This number is purposely set to a low number of 16384.
811 * If the dump on oops happens, it will be much appreciated
812 * to not have to wait for all that output. Anyway this can be
813 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200814 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400815#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400816
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400817static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200818
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200819/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200820static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200821
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200822/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200823 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200824 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700825DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200826
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800827/*
828 * serialize the access of the ring buffer
829 *
830 * ring buffer serializes readers, but it is low level protection.
831 * The validity of the events (which returns by ring_buffer_peek() ..etc)
832 * are not protected by ring buffer.
833 *
834 * The content of events may become garbage if we allow other process consumes
835 * these events concurrently:
836 * A) the page of the consumed events may become a normal page
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100837 * (not reader page) in ring buffer, and this page will be rewritten
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800838 * by events producer.
839 * B) The page of the consumed events may become a page for splice_read,
840 * and this page will be returned to system.
841 *
842 * These primitives allow multi process access to different cpu ring buffer
843 * concurrently.
844 *
845 * These primitives don't distinguish read-only and read-consume access.
846 * Multi read-only access are also serialized.
847 */
848
849#ifdef CONFIG_SMP
850static DECLARE_RWSEM(all_cpu_access_lock);
851static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
852
853static inline void trace_access_lock(int cpu)
854{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500855 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800856 /* gain it for accessing the whole ring buffer. */
857 down_write(&all_cpu_access_lock);
858 } else {
859 /* gain it for accessing a cpu ring buffer. */
860
Steven Rostedtae3b5092013-01-23 15:22:59 -0500861 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800862 down_read(&all_cpu_access_lock);
863
864 /* Secondly block other access to this @cpu ring buffer. */
865 mutex_lock(&per_cpu(cpu_access_lock, cpu));
866 }
867}
868
869static inline void trace_access_unlock(int cpu)
870{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500871 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800872 up_write(&all_cpu_access_lock);
873 } else {
874 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
875 up_read(&all_cpu_access_lock);
876 }
877}
878
879static inline void trace_access_lock_init(void)
880{
881 int cpu;
882
883 for_each_possible_cpu(cpu)
884 mutex_init(&per_cpu(cpu_access_lock, cpu));
885}
886
887#else
888
889static DEFINE_MUTEX(access_lock);
890
891static inline void trace_access_lock(int cpu)
892{
893 (void)cpu;
894 mutex_lock(&access_lock);
895}
896
897static inline void trace_access_unlock(int cpu)
898{
899 (void)cpu;
900 mutex_unlock(&access_lock);
901}
902
903static inline void trace_access_lock_init(void)
904{
905}
906
907#endif
908
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400909#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500910static void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100911 unsigned int trace_ctx,
912 int skip, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400913static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500914 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100915 unsigned int trace_ctx,
916 int skip, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400917
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400918#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500919static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100920 unsigned int trace_ctx,
921 int skip, struct pt_regs *regs)
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400922{
923}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400924static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500925 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100926 unsigned long trace_ctx,
927 int skip, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400928{
929}
930
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400931#endif
932
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500933static __always_inline void
934trace_event_setup(struct ring_buffer_event *event,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100935 int type, unsigned int trace_ctx)
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500936{
937 struct trace_entry *ent = ring_buffer_event_data(event);
938
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100939 tracing_generic_entry_update(ent, type, trace_ctx);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500940}
941
942static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500943__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500944 int type,
945 unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100946 unsigned int trace_ctx)
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500947{
948 struct ring_buffer_event *event;
949
950 event = ring_buffer_lock_reserve(buffer, len);
951 if (event != NULL)
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100952 trace_event_setup(event, type, trace_ctx);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500953
954 return event;
955}
956
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400957void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400958{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500959 if (tr->array_buffer.buffer)
960 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400961 /*
962 * This flag is looked at when buffers haven't been allocated
963 * yet, or by some tracers (like irqsoff), that just want to
964 * know if the ring buffer has been disabled, but it can handle
965 * races of where it gets disabled but we still do a record.
966 * As the check is in the fast path of the tracers, it is more
967 * important to be fast than accurate.
968 */
969 tr->buffer_disabled = 0;
970 /* Make the flag seen by readers */
971 smp_wmb();
972}
973
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200974/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500975 * tracing_on - enable tracing buffers
976 *
977 * This function enables tracing buffers that may have been
978 * disabled with tracing_off.
979 */
980void tracing_on(void)
981{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400982 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500983}
984EXPORT_SYMBOL_GPL(tracing_on);
985
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500986
987static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500988__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500989{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700990 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500991
992 /* If this is the temp buffer, we need to commit fully */
993 if (this_cpu_read(trace_buffered_event) == event) {
994 /* Length is in event->array[0] */
995 ring_buffer_write(buffer, event->array[0], &event->array[1]);
996 /* Release the temp buffer */
997 this_cpu_dec(trace_buffered_event_cnt);
998 } else
999 ring_buffer_unlock_commit(buffer, event);
1000}
1001
Steven Rostedt499e5472012-02-22 15:50:28 -05001002/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001003 * __trace_puts - write a constant string into the trace buffer.
1004 * @ip: The address of the caller
1005 * @str: The constant string to write
1006 * @size: The size of the string.
1007 */
1008int __trace_puts(unsigned long ip, const char *str, int size)
1009{
1010 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001011 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001012 struct print_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001013 unsigned int trace_ctx;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001014 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001015
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001016 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001017 return 0;
1018
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001019 if (unlikely(tracing_selftest_running || tracing_disabled))
1020 return 0;
1021
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001022 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1023
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001024 trace_ctx = tracing_gen_ctx();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001025 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001026 ring_buffer_nest_start(buffer);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001027 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1028 trace_ctx);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001029 if (!event) {
1030 size = 0;
1031 goto out;
1032 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001033
1034 entry = ring_buffer_event_data(event);
1035 entry->ip = ip;
1036
1037 memcpy(&entry->buf, str, size);
1038
1039 /* Add a newline if necessary */
1040 if (entry->buf[size - 1] != '\n') {
1041 entry->buf[size] = '\n';
1042 entry->buf[size + 1] = '\0';
1043 } else
1044 entry->buf[size] = '\0';
1045
1046 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001047 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001048 out:
1049 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001050 return size;
1051}
1052EXPORT_SYMBOL_GPL(__trace_puts);
1053
1054/**
1055 * __trace_bputs - write the pointer to a constant string into trace buffer
1056 * @ip: The address of the caller
1057 * @str: The constant string to write to the buffer to
1058 */
1059int __trace_bputs(unsigned long ip, const char *str)
1060{
1061 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001062 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001063 struct bputs_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001064 unsigned int trace_ctx;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001065 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001066 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001067
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001068 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001069 return 0;
1070
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001071 if (unlikely(tracing_selftest_running || tracing_disabled))
1072 return 0;
1073
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001074 trace_ctx = tracing_gen_ctx();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001075 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001076
1077 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001078 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001079 trace_ctx);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001080 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001081 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001082
1083 entry = ring_buffer_event_data(event);
1084 entry->ip = ip;
1085 entry->str = str;
1086
1087 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001088 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001089
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001090 ret = 1;
1091 out:
1092 ring_buffer_nest_end(buffer);
1093 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001094}
1095EXPORT_SYMBOL_GPL(__trace_bputs);
1096
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001097#ifdef CONFIG_TRACER_SNAPSHOT
Zou Wei192b7992020-04-23 12:08:25 +08001098static void tracing_snapshot_instance_cond(struct trace_array *tr,
1099 void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001100{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001101 struct tracer *tracer = tr->current_trace;
1102 unsigned long flags;
1103
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001104 if (in_nmi()) {
1105 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1106 internal_trace_puts("*** snapshot is being ignored ***\n");
1107 return;
1108 }
1109
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001110 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001111 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1112 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001113 tracing_off();
1114 return;
1115 }
1116
1117 /* Note, snapshot can not be used when the tracer uses it */
1118 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001119 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1120 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001121 return;
1122 }
1123
1124 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -06001125 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001126 local_irq_restore(flags);
1127}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001128
Tom Zanussia35873a2019-02-13 17:42:45 -06001129void tracing_snapshot_instance(struct trace_array *tr)
1130{
1131 tracing_snapshot_instance_cond(tr, NULL);
1132}
1133
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001134/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001135 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001136 *
1137 * This causes a swap between the snapshot buffer and the current live
1138 * tracing buffer. You can use this to take snapshots of the live
1139 * trace when some condition is triggered, but continue to trace.
1140 *
1141 * Note, make sure to allocate the snapshot with either
1142 * a tracing_snapshot_alloc(), or by doing it manually
1143 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1144 *
1145 * If the snapshot buffer is not allocated, it will stop tracing.
1146 * Basically making a permanent snapshot.
1147 */
1148void tracing_snapshot(void)
1149{
1150 struct trace_array *tr = &global_trace;
1151
1152 tracing_snapshot_instance(tr);
1153}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001154EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001155
Tom Zanussia35873a2019-02-13 17:42:45 -06001156/**
1157 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1158 * @tr: The tracing instance to snapshot
1159 * @cond_data: The data to be tested conditionally, and possibly saved
1160 *
1161 * This is the same as tracing_snapshot() except that the snapshot is
1162 * conditional - the snapshot will only happen if the
1163 * cond_snapshot.update() implementation receiving the cond_data
1164 * returns true, which means that the trace array's cond_snapshot
1165 * update() operation used the cond_data to determine whether the
1166 * snapshot should be taken, and if it was, presumably saved it along
1167 * with the snapshot.
1168 */
1169void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1170{
1171 tracing_snapshot_instance_cond(tr, cond_data);
1172}
1173EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1174
1175/**
1176 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1177 * @tr: The tracing instance
1178 *
1179 * When the user enables a conditional snapshot using
1180 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1181 * with the snapshot. This accessor is used to retrieve it.
1182 *
1183 * Should not be called from cond_snapshot.update(), since it takes
1184 * the tr->max_lock lock, which the code calling
1185 * cond_snapshot.update() has already done.
1186 *
1187 * Returns the cond_data associated with the trace array's snapshot.
1188 */
1189void *tracing_cond_snapshot_data(struct trace_array *tr)
1190{
1191 void *cond_data = NULL;
1192
1193 arch_spin_lock(&tr->max_lock);
1194
1195 if (tr->cond_snapshot)
1196 cond_data = tr->cond_snapshot->cond_data;
1197
1198 arch_spin_unlock(&tr->max_lock);
1199
1200 return cond_data;
1201}
1202EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1203
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001204static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1205 struct array_buffer *size_buf, int cpu_id);
1206static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001207
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001208int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001209{
1210 int ret;
1211
1212 if (!tr->allocated_snapshot) {
1213
1214 /* allocate spare buffer */
1215 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001216 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001217 if (ret < 0)
1218 return ret;
1219
1220 tr->allocated_snapshot = true;
1221 }
1222
1223 return 0;
1224}
1225
Fabian Frederickad1438a2014-04-17 21:44:42 +02001226static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001227{
1228 /*
1229 * We don't free the ring buffer. instead, resize it because
1230 * The max_tr ring buffer has some state (e.g. ring->clock) and
1231 * we want preserve it.
1232 */
1233 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1234 set_buffer_entries(&tr->max_buffer, 1);
1235 tracing_reset_online_cpus(&tr->max_buffer);
1236 tr->allocated_snapshot = false;
1237}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001238
1239/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001240 * tracing_alloc_snapshot - allocate snapshot buffer.
1241 *
1242 * This only allocates the snapshot buffer if it isn't already
1243 * allocated - it doesn't also take a snapshot.
1244 *
1245 * This is meant to be used in cases where the snapshot buffer needs
1246 * to be set up for events that can't sleep but need to be able to
1247 * trigger a snapshot.
1248 */
1249int tracing_alloc_snapshot(void)
1250{
1251 struct trace_array *tr = &global_trace;
1252 int ret;
1253
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001254 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001255 WARN_ON(ret < 0);
1256
1257 return ret;
1258}
1259EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1260
1261/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001262 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001263 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001264 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001265 * snapshot buffer if it isn't already allocated. Use this only
1266 * where it is safe to sleep, as the allocation may sleep.
1267 *
1268 * This causes a swap between the snapshot buffer and the current live
1269 * tracing buffer. You can use this to take snapshots of the live
1270 * trace when some condition is triggered, but continue to trace.
1271 */
1272void tracing_snapshot_alloc(void)
1273{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001274 int ret;
1275
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001276 ret = tracing_alloc_snapshot();
1277 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001278 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001279
1280 tracing_snapshot();
1281}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001282EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001283
1284/**
1285 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1286 * @tr: The tracing instance
1287 * @cond_data: User data to associate with the snapshot
1288 * @update: Implementation of the cond_snapshot update function
1289 *
1290 * Check whether the conditional snapshot for the given instance has
1291 * already been enabled, or if the current tracer is already using a
1292 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1293 * save the cond_data and update function inside.
1294 *
1295 * Returns 0 if successful, error otherwise.
1296 */
1297int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1298 cond_update_fn_t update)
1299{
1300 struct cond_snapshot *cond_snapshot;
1301 int ret = 0;
1302
1303 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1304 if (!cond_snapshot)
1305 return -ENOMEM;
1306
1307 cond_snapshot->cond_data = cond_data;
1308 cond_snapshot->update = update;
1309
1310 mutex_lock(&trace_types_lock);
1311
1312 ret = tracing_alloc_snapshot_instance(tr);
1313 if (ret)
1314 goto fail_unlock;
1315
1316 if (tr->current_trace->use_max_tr) {
1317 ret = -EBUSY;
1318 goto fail_unlock;
1319 }
1320
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001321 /*
1322 * The cond_snapshot can only change to NULL without the
1323 * trace_types_lock. We don't care if we race with it going
1324 * to NULL, but we want to make sure that it's not set to
1325 * something other than NULL when we get here, which we can
1326 * do safely with only holding the trace_types_lock and not
1327 * having to take the max_lock.
1328 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001329 if (tr->cond_snapshot) {
1330 ret = -EBUSY;
1331 goto fail_unlock;
1332 }
1333
1334 arch_spin_lock(&tr->max_lock);
1335 tr->cond_snapshot = cond_snapshot;
1336 arch_spin_unlock(&tr->max_lock);
1337
1338 mutex_unlock(&trace_types_lock);
1339
1340 return ret;
1341
1342 fail_unlock:
1343 mutex_unlock(&trace_types_lock);
1344 kfree(cond_snapshot);
1345 return ret;
1346}
1347EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1348
1349/**
1350 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1351 * @tr: The tracing instance
1352 *
1353 * Check whether the conditional snapshot for the given instance is
1354 * enabled; if so, free the cond_snapshot associated with it,
1355 * otherwise return -EINVAL.
1356 *
1357 * Returns 0 if successful, error otherwise.
1358 */
1359int tracing_snapshot_cond_disable(struct trace_array *tr)
1360{
1361 int ret = 0;
1362
1363 arch_spin_lock(&tr->max_lock);
1364
1365 if (!tr->cond_snapshot)
1366 ret = -EINVAL;
1367 else {
1368 kfree(tr->cond_snapshot);
1369 tr->cond_snapshot = NULL;
1370 }
1371
1372 arch_spin_unlock(&tr->max_lock);
1373
1374 return ret;
1375}
1376EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001377#else
1378void tracing_snapshot(void)
1379{
1380 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1381}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001382EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001383void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1384{
1385 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1386}
1387EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001388int tracing_alloc_snapshot(void)
1389{
1390 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1391 return -ENODEV;
1392}
1393EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001394void tracing_snapshot_alloc(void)
1395{
1396 /* Give warning */
1397 tracing_snapshot();
1398}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001399EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001400void *tracing_cond_snapshot_data(struct trace_array *tr)
1401{
1402 return NULL;
1403}
1404EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1405int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1406{
1407 return -ENODEV;
1408}
1409EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1410int tracing_snapshot_cond_disable(struct trace_array *tr)
1411{
1412 return false;
1413}
1414EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001415#endif /* CONFIG_TRACER_SNAPSHOT */
1416
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001417void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001418{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001419 if (tr->array_buffer.buffer)
1420 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001421 /*
1422 * This flag is looked at when buffers haven't been allocated
1423 * yet, or by some tracers (like irqsoff), that just want to
1424 * know if the ring buffer has been disabled, but it can handle
1425 * races of where it gets disabled but we still do a record.
1426 * As the check is in the fast path of the tracers, it is more
1427 * important to be fast than accurate.
1428 */
1429 tr->buffer_disabled = 1;
1430 /* Make the flag seen by readers */
1431 smp_wmb();
1432}
1433
Steven Rostedt499e5472012-02-22 15:50:28 -05001434/**
1435 * tracing_off - turn off tracing buffers
1436 *
1437 * This function stops the tracing buffers from recording data.
1438 * It does not disable any overhead the tracers themselves may
1439 * be causing. This function simply causes all recording to
1440 * the ring buffers to fail.
1441 */
1442void tracing_off(void)
1443{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001444 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001445}
1446EXPORT_SYMBOL_GPL(tracing_off);
1447
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001448void disable_trace_on_warning(void)
1449{
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001450 if (__disable_trace_on_warning) {
1451 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1452 "Disabling tracing due to warning\n");
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001453 tracing_off();
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001454 }
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001455}
1456
Steven Rostedt499e5472012-02-22 15:50:28 -05001457/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001458 * tracer_tracing_is_on - show real state of ring buffer enabled
1459 * @tr : the trace array to know if ring buffer is enabled
1460 *
1461 * Shows real state of the ring buffer if it is enabled or not.
1462 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001463bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001464{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001465 if (tr->array_buffer.buffer)
1466 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001467 return !tr->buffer_disabled;
1468}
1469
Steven Rostedt499e5472012-02-22 15:50:28 -05001470/**
1471 * tracing_is_on - show state of ring buffers enabled
1472 */
1473int tracing_is_on(void)
1474{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001475 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001476}
1477EXPORT_SYMBOL_GPL(tracing_is_on);
1478
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001479static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001480{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001481 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001482
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483 if (!str)
1484 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001485 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001486 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001487 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001488 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001489 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001490 return 1;
1491}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001492__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001493
Tim Bird0e950172010-02-25 15:36:43 -08001494static int __init set_tracing_thresh(char *str)
1495{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001496 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001497 int ret;
1498
1499 if (!str)
1500 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001501 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001502 if (ret < 0)
1503 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001504 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001505 return 1;
1506}
1507__setup("tracing_thresh=", set_tracing_thresh);
1508
Steven Rostedt57f50be2008-05-12 21:20:44 +02001509unsigned long nsecs_to_usecs(unsigned long nsecs)
1510{
1511 return nsecs / 1000;
1512}
1513
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001514/*
1515 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001516 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001517 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001518 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001519 */
1520#undef C
1521#define C(a, b) b
1522
Ingo Molnarf2cc0202021-03-23 18:49:35 +01001523/* These must match the bit positions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001524static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001525 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526 NULL
1527};
1528
Zhaolei5079f322009-08-25 16:12:56 +08001529static struct {
1530 u64 (*func)(void);
1531 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001532 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001533} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001534 { trace_clock_local, "local", 1 },
1535 { trace_clock_global, "global", 1 },
1536 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001537 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001538 { trace_clock, "perf", 1 },
1539 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001540 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001541 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001542 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001543};
1544
Tom Zanussi860f9f62018-01-15 20:51:48 -06001545bool trace_clock_in_ns(struct trace_array *tr)
1546{
1547 if (trace_clocks[tr->clock_id].in_ns)
1548 return true;
1549
1550 return false;
1551}
1552
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001553/*
1554 * trace_parser_get_init - gets the buffer for trace parser
1555 */
1556int trace_parser_get_init(struct trace_parser *parser, int size)
1557{
1558 memset(parser, 0, sizeof(*parser));
1559
1560 parser->buffer = kmalloc(size, GFP_KERNEL);
1561 if (!parser->buffer)
1562 return 1;
1563
1564 parser->size = size;
1565 return 0;
1566}
1567
1568/*
1569 * trace_parser_put - frees the buffer for trace parser
1570 */
1571void trace_parser_put(struct trace_parser *parser)
1572{
1573 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001574 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001575}
1576
1577/*
1578 * trace_get_user - reads the user input string separated by space
1579 * (matched by isspace(ch))
1580 *
1581 * For each string found the 'struct trace_parser' is updated,
1582 * and the function returns.
1583 *
1584 * Returns number of bytes read.
1585 *
1586 * See kernel/trace/trace.h for 'struct trace_parser' details.
1587 */
1588int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1589 size_t cnt, loff_t *ppos)
1590{
1591 char ch;
1592 size_t read = 0;
1593 ssize_t ret;
1594
1595 if (!*ppos)
1596 trace_parser_clear(parser);
1597
1598 ret = get_user(ch, ubuf++);
1599 if (ret)
1600 goto out;
1601
1602 read++;
1603 cnt--;
1604
1605 /*
1606 * The parser is not finished with the last write,
1607 * continue reading the user input without skipping spaces.
1608 */
1609 if (!parser->cont) {
1610 /* skip white space */
1611 while (cnt && isspace(ch)) {
1612 ret = get_user(ch, ubuf++);
1613 if (ret)
1614 goto out;
1615 read++;
1616 cnt--;
1617 }
1618
Changbin Du76638d92018-01-16 17:02:29 +08001619 parser->idx = 0;
1620
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001621 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001622 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001623 *ppos += read;
1624 ret = read;
1625 goto out;
1626 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001627 }
1628
1629 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001630 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001631 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001632 parser->buffer[parser->idx++] = ch;
1633 else {
1634 ret = -EINVAL;
1635 goto out;
1636 }
1637 ret = get_user(ch, ubuf++);
1638 if (ret)
1639 goto out;
1640 read++;
1641 cnt--;
1642 }
1643
1644 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001645 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001646 parser->buffer[parser->idx] = 0;
1647 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001648 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001649 parser->cont = true;
1650 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001651 /* Make sure the parsed string always terminates with '\0'. */
1652 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001653 } else {
1654 ret = -EINVAL;
1655 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001656 }
1657
1658 *ppos += read;
1659 ret = read;
1660
1661out:
1662 return ret;
1663}
1664
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001665/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001666static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001667{
1668 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001669
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001670 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001671 return -EBUSY;
1672
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001673 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001674 if (cnt > len)
1675 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001676 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001677
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001678 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001679 return cnt;
1680}
1681
Tim Bird0e950172010-02-25 15:36:43 -08001682unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001683static const struct file_operations tracing_max_lat_fops;
1684
1685#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1686 defined(CONFIG_FSNOTIFY)
1687
1688static struct workqueue_struct *fsnotify_wq;
1689
1690static void latency_fsnotify_workfn(struct work_struct *work)
1691{
1692 struct trace_array *tr = container_of(work, struct trace_array,
1693 fsnotify_work);
Amir Goldstein82ace1e2020-07-22 15:58:44 +03001694 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001695}
1696
1697static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1698{
1699 struct trace_array *tr = container_of(iwork, struct trace_array,
1700 fsnotify_irqwork);
1701 queue_work(fsnotify_wq, &tr->fsnotify_work);
1702}
1703
1704static void trace_create_maxlat_file(struct trace_array *tr,
1705 struct dentry *d_tracer)
1706{
1707 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1708 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1709 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1710 d_tracer, &tr->max_latency,
1711 &tracing_max_lat_fops);
1712}
1713
1714__init static int latency_fsnotify_init(void)
1715{
1716 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1717 WQ_UNBOUND | WQ_HIGHPRI, 0);
1718 if (!fsnotify_wq) {
1719 pr_err("Unable to allocate tr_max_lat_wq\n");
1720 return -ENOMEM;
1721 }
1722 return 0;
1723}
1724
1725late_initcall_sync(latency_fsnotify_init);
1726
1727void latency_fsnotify(struct trace_array *tr)
1728{
1729 if (!fsnotify_wq)
1730 return;
1731 /*
1732 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1733 * possible that we are called from __schedule() or do_idle(), which
1734 * could cause a deadlock.
1735 */
1736 irq_work_queue(&tr->fsnotify_irqwork);
1737}
1738
1739/*
1740 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1741 * defined(CONFIG_FSNOTIFY)
1742 */
1743#else
1744
1745#define trace_create_maxlat_file(tr, d_tracer) \
1746 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1747 &tr->max_latency, &tracing_max_lat_fops)
1748
1749#endif
Tim Bird0e950172010-02-25 15:36:43 -08001750
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001751#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001752/*
1753 * Copy the new maximum trace into the separate maximum-trace
1754 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001755 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001756 */
1757static void
1758__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1759{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001760 struct array_buffer *trace_buf = &tr->array_buffer;
1761 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001762 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1763 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001764
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001765 max_buf->cpu = cpu;
1766 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001767
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001768 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001769 max_data->critical_start = data->critical_start;
1770 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001771
Tom Zanussi85f726a2019-03-05 10:12:00 -06001772 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001773 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001774 /*
1775 * If tsk == current, then use current_uid(), as that does not use
1776 * RCU. The irq tracer can be called out of RCU scope.
1777 */
1778 if (tsk == current)
1779 max_data->uid = current_uid();
1780 else
1781 max_data->uid = task_uid(tsk);
1782
Steven Rostedt8248ac02009-09-02 12:27:41 -04001783 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1784 max_data->policy = tsk->policy;
1785 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001786
1787 /* record this tasks comm */
1788 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001789 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001790}
1791
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001792/**
1793 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1794 * @tr: tracer
1795 * @tsk: the task with the latency
1796 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001797 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001798 *
1799 * Flip the buffers between the @tr and the max_tr and record information
1800 * about which task was the cause of this latency.
1801 */
Ingo Molnare309b412008-05-12 21:20:51 +02001802void
Tom Zanussia35873a2019-02-13 17:42:45 -06001803update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1804 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001805{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001806 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001807 return;
1808
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001809 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001810
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001811 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001812 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001813 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001814 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001815 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001816
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001817 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001818
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001819 /* Inherit the recordable setting from array_buffer */
1820 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001821 ring_buffer_record_on(tr->max_buffer.buffer);
1822 else
1823 ring_buffer_record_off(tr->max_buffer.buffer);
1824
Tom Zanussia35873a2019-02-13 17:42:45 -06001825#ifdef CONFIG_TRACER_SNAPSHOT
1826 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1827 goto out_unlock;
1828#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001829 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001830
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001831 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001832
1833 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001834 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001835}
1836
1837/**
1838 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001839 * @tr: tracer
1840 * @tsk: task with the latency
1841 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001842 *
1843 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001844 */
Ingo Molnare309b412008-05-12 21:20:51 +02001845void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001846update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1847{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001848 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001849
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001850 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001851 return;
1852
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001853 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001854 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001855 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001856 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001857 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001858 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001859
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001860 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001861
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001862 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001863
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001864 if (ret == -EBUSY) {
1865 /*
1866 * We failed to swap the buffer due to a commit taking
1867 * place on this CPU. We fail to record, but we reset
1868 * the max trace buffer (no one writes directly to it)
1869 * and flag that it failed.
1870 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001871 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001872 "Failed to swap buffers due to commit in progress\n");
1873 }
1874
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001875 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001876
1877 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001878 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001879}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001880#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001881
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001882static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001883{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001884 /* Iterators are static, they should be filled or empty */
1885 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001886 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001887
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001888 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001889 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001890}
1891
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001892#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001893static bool selftests_can_run;
1894
1895struct trace_selftests {
1896 struct list_head list;
1897 struct tracer *type;
1898};
1899
1900static LIST_HEAD(postponed_selftests);
1901
1902static int save_selftest(struct tracer *type)
1903{
1904 struct trace_selftests *selftest;
1905
1906 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1907 if (!selftest)
1908 return -ENOMEM;
1909
1910 selftest->type = type;
1911 list_add(&selftest->list, &postponed_selftests);
1912 return 0;
1913}
1914
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001915static int run_tracer_selftest(struct tracer *type)
1916{
1917 struct trace_array *tr = &global_trace;
1918 struct tracer *saved_tracer = tr->current_trace;
1919 int ret;
1920
1921 if (!type->selftest || tracing_selftest_disabled)
1922 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001923
1924 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001925 * If a tracer registers early in boot up (before scheduling is
1926 * initialized and such), then do not run its selftests yet.
1927 * Instead, run it a little later in the boot process.
1928 */
1929 if (!selftests_can_run)
1930 return save_selftest(type);
1931
Steven Rostedt (VMware)ee666a12021-03-01 10:49:35 -05001932 if (!tracing_is_on()) {
1933 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1934 type->name);
1935 return 0;
1936 }
1937
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001938 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001939 * Run a selftest on this tracer.
1940 * Here we reset the trace buffer, and set the current
1941 * tracer to be this tracer. The tracer can then run some
1942 * internal tracing to verify that everything is in order.
1943 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001944 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001945 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001946
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001947 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001948
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001949#ifdef CONFIG_TRACER_MAX_TRACE
1950 if (type->use_max_tr) {
1951 /* If we expanded the buffers, make sure the max is expanded too */
1952 if (ring_buffer_expanded)
1953 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1954 RING_BUFFER_ALL_CPUS);
1955 tr->allocated_snapshot = true;
1956 }
1957#endif
1958
1959 /* the test is responsible for initializing and enabling */
1960 pr_info("Testing tracer %s: ", type->name);
1961 ret = type->selftest(type, tr);
1962 /* the test is responsible for resetting too */
1963 tr->current_trace = saved_tracer;
1964 if (ret) {
1965 printk(KERN_CONT "FAILED!\n");
1966 /* Add the warning after printing 'FAILED' */
1967 WARN_ON(1);
1968 return -1;
1969 }
1970 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001971 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001972
1973#ifdef CONFIG_TRACER_MAX_TRACE
1974 if (type->use_max_tr) {
1975 tr->allocated_snapshot = false;
1976
1977 /* Shrink the max buffer again */
1978 if (ring_buffer_expanded)
1979 ring_buffer_resize(tr->max_buffer.buffer, 1,
1980 RING_BUFFER_ALL_CPUS);
1981 }
1982#endif
1983
1984 printk(KERN_CONT "PASSED\n");
1985 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001986}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001987
1988static __init int init_trace_selftests(void)
1989{
1990 struct trace_selftests *p, *n;
1991 struct tracer *t, **last;
1992 int ret;
1993
1994 selftests_can_run = true;
1995
1996 mutex_lock(&trace_types_lock);
1997
1998 if (list_empty(&postponed_selftests))
1999 goto out;
2000
2001 pr_info("Running postponed tracer tests:\n");
2002
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002003 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002004 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01002005 /* This loop can take minutes when sanitizers are enabled, so
2006 * lets make sure we allow RCU processing.
2007 */
2008 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002009 ret = run_tracer_selftest(p->type);
2010 /* If the test fails, then warn and remove from available_tracers */
2011 if (ret < 0) {
2012 WARN(1, "tracer: %s failed selftest, disabling\n",
2013 p->type->name);
2014 last = &trace_types;
2015 for (t = trace_types; t; t = t->next) {
2016 if (t == p->type) {
2017 *last = t->next;
2018 break;
2019 }
2020 last = &t->next;
2021 }
2022 }
2023 list_del(&p->list);
2024 kfree(p);
2025 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002026 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002027
2028 out:
2029 mutex_unlock(&trace_types_lock);
2030
2031 return 0;
2032}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04002033core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002034#else
2035static inline int run_tracer_selftest(struct tracer *type)
2036{
2037 return 0;
2038}
2039#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002040
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002041static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2042
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002043static void __init apply_trace_boot_options(void);
2044
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002045/**
2046 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002047 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002048 *
2049 * Register a new plugin tracer.
2050 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002051int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002052{
2053 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002054 int ret = 0;
2055
2056 if (!type->name) {
2057 pr_info("Tracer must have a name\n");
2058 return -1;
2059 }
2060
Dan Carpenter24a461d2010-07-10 12:06:44 +02002061 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08002062 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2063 return -1;
2064 }
2065
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002066 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11002067 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002068 type->name);
2069 return -EPERM;
2070 }
2071
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002072 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01002073
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002074 tracing_selftest_running = true;
2075
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002076 for (t = trace_types; t; t = t->next) {
2077 if (strcmp(type->name, t->name) == 0) {
2078 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08002079 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002080 type->name);
2081 ret = -1;
2082 goto out;
2083 }
2084 }
2085
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002086 if (!type->set_flag)
2087 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08002088 if (!type->flags) {
2089 /*allocate a dummy tracer_flags*/
2090 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08002091 if (!type->flags) {
2092 ret = -ENOMEM;
2093 goto out;
2094 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08002095 type->flags->val = 0;
2096 type->flags->opts = dummy_tracer_opt;
2097 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002098 if (!type->flags->opts)
2099 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002100
Chunyu Hud39cdd22016-03-08 21:37:01 +08002101 /* store the tracer for __set_tracer_option */
2102 type->flags->trace = type;
2103
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002104 ret = run_tracer_selftest(type);
2105 if (ret < 0)
2106 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02002107
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002108 type->next = trace_types;
2109 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002110 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02002111
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002112 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002113 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002114 mutex_unlock(&trace_types_lock);
2115
Steven Rostedtdac74942009-02-05 01:13:38 -05002116 if (ret || !default_bootup_tracer)
2117 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002118
Li Zefanee6c2c12009-09-18 14:06:47 +08002119 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05002120 goto out_unlock;
2121
2122 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2123 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05002124 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05002125 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002126
2127 apply_trace_boot_options();
2128
Steven Rostedtdac74942009-02-05 01:13:38 -05002129 /* disable other selftests, since this will break it. */
Masami Hiramatsu60efe212020-12-08 17:54:09 +09002130 disable_tracing_selftest("running a tracer");
Steven Rostedtdac74942009-02-05 01:13:38 -05002131
2132 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002133 return ret;
2134}
2135
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002136static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04002137{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002138 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04002139
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002140 if (!buffer)
2141 return;
2142
Steven Rostedtf6339032009-09-04 12:35:16 -04002143 ring_buffer_record_disable(buffer);
2144
2145 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002146 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04002147 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04002148
2149 ring_buffer_record_enable(buffer);
2150}
2151
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002152void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002153{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002154 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002155
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002156 if (!buffer)
2157 return;
2158
Steven Rostedt621968c2009-09-04 12:02:35 -04002159 ring_buffer_record_disable(buffer);
2160
2161 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002162 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002163
Alexander Z Lam94571582013-08-02 18:36:16 -07002164 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002165
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +10002166 ring_buffer_reset_online_cpus(buffer);
Steven Rostedt621968c2009-09-04 12:02:35 -04002167
2168 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002169}
2170
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002171/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002172void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002173{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002174 struct trace_array *tr;
2175
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002176 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002177 if (!tr->clear_trace)
2178 continue;
2179 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002180 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002181#ifdef CONFIG_TRACER_MAX_TRACE
2182 tracing_reset_online_cpus(&tr->max_buffer);
2183#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002184 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002185}
2186
Joel Fernandesd914ba32017-06-26 19:01:55 -07002187static int *tgid_map;
2188
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002189#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002190#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002191static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002192struct saved_cmdlines_buffer {
2193 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2194 unsigned *map_cmdline_to_pid;
2195 unsigned cmdline_num;
2196 int cmdline_idx;
2197 char *saved_cmdlines;
2198};
2199static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002200
Steven Rostedt25b0b442008-05-12 21:21:00 +02002201/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002202static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002203
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002204static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002205{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002206 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2207}
2208
2209static inline void set_cmdline(int idx, const char *cmdline)
2210{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002211 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002212}
2213
2214static int allocate_cmdlines_buffer(unsigned int val,
2215 struct saved_cmdlines_buffer *s)
2216{
Kees Cook6da2ec52018-06-12 13:55:00 -07002217 s->map_cmdline_to_pid = kmalloc_array(val,
2218 sizeof(*s->map_cmdline_to_pid),
2219 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002220 if (!s->map_cmdline_to_pid)
2221 return -ENOMEM;
2222
Kees Cook6da2ec52018-06-12 13:55:00 -07002223 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002224 if (!s->saved_cmdlines) {
2225 kfree(s->map_cmdline_to_pid);
2226 return -ENOMEM;
2227 }
2228
2229 s->cmdline_idx = 0;
2230 s->cmdline_num = val;
2231 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2232 sizeof(s->map_pid_to_cmdline));
2233 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2234 val * sizeof(*s->map_cmdline_to_pid));
2235
2236 return 0;
2237}
2238
2239static int trace_create_savedcmd(void)
2240{
2241 int ret;
2242
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002243 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002244 if (!savedcmd)
2245 return -ENOMEM;
2246
2247 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2248 if (ret < 0) {
2249 kfree(savedcmd);
2250 savedcmd = NULL;
2251 return -ENOMEM;
2252 }
2253
2254 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002255}
2256
Carsten Emdeb5130b12009-09-13 01:43:07 +02002257int is_tracing_stopped(void)
2258{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002259 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002260}
2261
Steven Rostedt0f048702008-11-05 16:05:44 -05002262/**
2263 * tracing_start - quick start of the tracer
2264 *
2265 * If tracing is enabled but was stopped by tracing_stop,
2266 * this will start the tracer back up.
2267 */
2268void tracing_start(void)
2269{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002270 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002271 unsigned long flags;
2272
2273 if (tracing_disabled)
2274 return;
2275
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002276 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2277 if (--global_trace.stop_count) {
2278 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002279 /* Someone screwed up their debugging */
2280 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002281 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002282 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002283 goto out;
2284 }
2285
Steven Rostedta2f80712010-03-12 19:56:00 -05002286 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002287 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002288
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002289 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002290 if (buffer)
2291 ring_buffer_record_enable(buffer);
2292
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002293#ifdef CONFIG_TRACER_MAX_TRACE
2294 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002295 if (buffer)
2296 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002297#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002298
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002299 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002300
Steven Rostedt0f048702008-11-05 16:05:44 -05002301 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002302 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2303}
2304
2305static void tracing_start_tr(struct trace_array *tr)
2306{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002307 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002308 unsigned long flags;
2309
2310 if (tracing_disabled)
2311 return;
2312
2313 /* If global, we need to also start the max tracer */
2314 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2315 return tracing_start();
2316
2317 raw_spin_lock_irqsave(&tr->start_lock, flags);
2318
2319 if (--tr->stop_count) {
2320 if (tr->stop_count < 0) {
2321 /* Someone screwed up their debugging */
2322 WARN_ON_ONCE(1);
2323 tr->stop_count = 0;
2324 }
2325 goto out;
2326 }
2327
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002328 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002329 if (buffer)
2330 ring_buffer_record_enable(buffer);
2331
2332 out:
2333 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002334}
2335
2336/**
2337 * tracing_stop - quick stop of the tracer
2338 *
2339 * Light weight way to stop tracing. Use in conjunction with
2340 * tracing_start.
2341 */
2342void tracing_stop(void)
2343{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002344 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002345 unsigned long flags;
2346
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002347 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2348 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002349 goto out;
2350
Steven Rostedta2f80712010-03-12 19:56:00 -05002351 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002352 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002353
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002354 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002355 if (buffer)
2356 ring_buffer_record_disable(buffer);
2357
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002358#ifdef CONFIG_TRACER_MAX_TRACE
2359 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002360 if (buffer)
2361 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002362#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002363
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002364 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002365
Steven Rostedt0f048702008-11-05 16:05:44 -05002366 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002367 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2368}
2369
2370static void tracing_stop_tr(struct trace_array *tr)
2371{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002372 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002373 unsigned long flags;
2374
2375 /* If global, we need to also stop the max tracer */
2376 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2377 return tracing_stop();
2378
2379 raw_spin_lock_irqsave(&tr->start_lock, flags);
2380 if (tr->stop_count++)
2381 goto out;
2382
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002383 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002384 if (buffer)
2385 ring_buffer_record_disable(buffer);
2386
2387 out:
2388 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002389}
2390
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002391static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002392{
Carsten Emdea635cf02009-03-18 09:00:41 +01002393 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002394
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002395 /* treat recording of idle task as a success */
2396 if (!tsk->pid)
2397 return 1;
2398
2399 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002400 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002401
2402 /*
2403 * It's not the end of the world if we don't get
2404 * the lock, but we also don't want to spin
2405 * nor do we want to disable interrupts,
2406 * so if we miss here, then better luck next time.
2407 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002408 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002409 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002410
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002411 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002412 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002413 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002414
Carsten Emdea635cf02009-03-18 09:00:41 +01002415 /*
2416 * Check whether the cmdline buffer at idx has a pid
2417 * mapped. We are going to overwrite that entry so we
2418 * need to clear the map_pid_to_cmdline. Otherwise we
2419 * would read the new comm for the old pid.
2420 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002421 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01002422 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002423 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002424
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002425 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2426 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002427
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002428 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002429 }
2430
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002431 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002432
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002433 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002434
2435 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002436}
2437
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002438static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002439{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002440 unsigned map;
2441
Steven Rostedt4ca530852009-03-16 19:20:15 -04002442 if (!pid) {
2443 strcpy(comm, "<idle>");
2444 return;
2445 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002446
Steven Rostedt74bf4072010-01-25 15:11:53 -05002447 if (WARN_ON_ONCE(pid < 0)) {
2448 strcpy(comm, "<XXX>");
2449 return;
2450 }
2451
Steven Rostedt4ca530852009-03-16 19:20:15 -04002452 if (pid > PID_MAX_DEFAULT) {
2453 strcpy(comm, "<...>");
2454 return;
2455 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002456
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002457 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01002458 if (map != NO_CMDLINE_MAP)
Amey Telawanee09e2862017-05-03 15:41:14 +05302459 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
Thomas Gleixner50d88752009-03-18 08:58:44 +01002460 else
2461 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002462}
2463
2464void trace_find_cmdline(int pid, char comm[])
2465{
2466 preempt_disable();
2467 arch_spin_lock(&trace_cmdline_lock);
2468
2469 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002470
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002471 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002472 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002473}
2474
Joel Fernandesd914ba32017-06-26 19:01:55 -07002475int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002476{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002477 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2478 return 0;
2479
2480 return tgid_map[pid];
2481}
2482
2483static int trace_save_tgid(struct task_struct *tsk)
2484{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002485 /* treat recording of idle task as a success */
2486 if (!tsk->pid)
2487 return 1;
2488
2489 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002490 return 0;
2491
2492 tgid_map[tsk->pid] = tsk->tgid;
2493 return 1;
2494}
2495
2496static bool tracing_record_taskinfo_skip(int flags)
2497{
2498 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2499 return true;
2500 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2501 return true;
2502 if (!__this_cpu_read(trace_taskinfo_save))
2503 return true;
2504 return false;
2505}
2506
2507/**
2508 * tracing_record_taskinfo - record the task info of a task
2509 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002510 * @task: task to record
2511 * @flags: TRACE_RECORD_CMDLINE for recording comm
2512 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002513 */
2514void tracing_record_taskinfo(struct task_struct *task, int flags)
2515{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002516 bool done;
2517
Joel Fernandesd914ba32017-06-26 19:01:55 -07002518 if (tracing_record_taskinfo_skip(flags))
2519 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002520
2521 /*
2522 * Record as much task information as possible. If some fail, continue
2523 * to try to record the others.
2524 */
2525 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2526 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2527
2528 /* If recording any information failed, retry again soon. */
2529 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530 return;
2531
Joel Fernandesd914ba32017-06-26 19:01:55 -07002532 __this_cpu_write(trace_taskinfo_save, false);
2533}
2534
2535/**
2536 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2537 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002538 * @prev: previous task during sched_switch
2539 * @next: next task during sched_switch
2540 * @flags: TRACE_RECORD_CMDLINE for recording comm
2541 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002542 */
2543void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2544 struct task_struct *next, int flags)
2545{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002546 bool done;
2547
Joel Fernandesd914ba32017-06-26 19:01:55 -07002548 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002549 return;
2550
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002551 /*
2552 * Record as much task information as possible. If some fail, continue
2553 * to try to record the others.
2554 */
2555 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2556 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2557 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2558 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002559
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002560 /* If recording any information failed, retry again soon. */
2561 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002562 return;
2563
2564 __this_cpu_write(trace_taskinfo_save, false);
2565}
2566
2567/* Helpers to record a specific task information */
2568void tracing_record_cmdline(struct task_struct *task)
2569{
2570 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2571}
2572
2573void tracing_record_tgid(struct task_struct *task)
2574{
2575 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576}
2577
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002578/*
2579 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2580 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2581 * simplifies those functions and keeps them in sync.
2582 */
2583enum print_line_t trace_handle_return(struct trace_seq *s)
2584{
2585 return trace_seq_has_overflowed(s) ?
2586 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2587}
2588EXPORT_SYMBOL_GPL(trace_handle_return);
2589
Sebastian Andrzej Siewior0c020062021-01-25 20:45:09 +01002590unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002591{
Sebastian Andrzej Siewior0c020062021-01-25 20:45:09 +01002592 unsigned int trace_flags = irqs_status;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002593 unsigned int pc;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002594
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002595 pc = preempt_count();
2596
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002597 if (pc & NMI_MASK)
2598 trace_flags |= TRACE_FLAG_NMI;
2599 if (pc & HARDIRQ_MASK)
2600 trace_flags |= TRACE_FLAG_HARDIRQ;
Sebastian Andrzej Siewiorfe427882021-01-25 20:45:10 +01002601 if (in_serving_softirq())
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002602 trace_flags |= TRACE_FLAG_SOFTIRQ;
2603
2604 if (tif_need_resched())
2605 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2606 if (test_preempt_need_resched())
2607 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2608 return (trace_flags << 16) | (pc & 0xff);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002609}
2610
Steven Rostedte77405a2009-09-02 14:17:06 -04002611struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002612trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002613 int type,
2614 unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002615 unsigned int trace_ctx)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002616{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002617 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002618}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002619
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002620DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2621DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2622static int trace_buffered_event_ref;
2623
2624/**
2625 * trace_buffered_event_enable - enable buffering events
2626 *
2627 * When events are being filtered, it is quicker to use a temporary
2628 * buffer to write the event data into if there's a likely chance
2629 * that it will not be committed. The discard of the ring buffer
2630 * is not as fast as committing, and is much slower than copying
2631 * a commit.
2632 *
2633 * When an event is to be filtered, allocate per cpu buffers to
2634 * write the event data into, and if the event is filtered and discarded
2635 * it is simply dropped, otherwise, the entire data is to be committed
2636 * in one shot.
2637 */
2638void trace_buffered_event_enable(void)
2639{
2640 struct ring_buffer_event *event;
2641 struct page *page;
2642 int cpu;
2643
2644 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2645
2646 if (trace_buffered_event_ref++)
2647 return;
2648
2649 for_each_tracing_cpu(cpu) {
2650 page = alloc_pages_node(cpu_to_node(cpu),
2651 GFP_KERNEL | __GFP_NORETRY, 0);
2652 if (!page)
2653 goto failed;
2654
2655 event = page_address(page);
2656 memset(event, 0, sizeof(*event));
2657
2658 per_cpu(trace_buffered_event, cpu) = event;
2659
2660 preempt_disable();
2661 if (cpu == smp_processor_id() &&
Xianting Tianb427e762020-08-13 19:28:03 +08002662 __this_cpu_read(trace_buffered_event) !=
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002663 per_cpu(trace_buffered_event, cpu))
2664 WARN_ON_ONCE(1);
2665 preempt_enable();
2666 }
2667
2668 return;
2669 failed:
2670 trace_buffered_event_disable();
2671}
2672
2673static void enable_trace_buffered_event(void *data)
2674{
2675 /* Probably not needed, but do it anyway */
2676 smp_rmb();
2677 this_cpu_dec(trace_buffered_event_cnt);
2678}
2679
2680static void disable_trace_buffered_event(void *data)
2681{
2682 this_cpu_inc(trace_buffered_event_cnt);
2683}
2684
2685/**
2686 * trace_buffered_event_disable - disable buffering events
2687 *
2688 * When a filter is removed, it is faster to not use the buffered
2689 * events, and to commit directly into the ring buffer. Free up
2690 * the temp buffers when there are no more users. This requires
2691 * special synchronization with current events.
2692 */
2693void trace_buffered_event_disable(void)
2694{
2695 int cpu;
2696
2697 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2698
2699 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2700 return;
2701
2702 if (--trace_buffered_event_ref)
2703 return;
2704
2705 preempt_disable();
2706 /* For each CPU, set the buffer as used. */
2707 smp_call_function_many(tracing_buffer_mask,
2708 disable_trace_buffered_event, NULL, 1);
2709 preempt_enable();
2710
2711 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002712 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002713
2714 for_each_tracing_cpu(cpu) {
2715 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2716 per_cpu(trace_buffered_event, cpu) = NULL;
2717 }
2718 /*
2719 * Make sure trace_buffered_event is NULL before clearing
2720 * trace_buffered_event_cnt.
2721 */
2722 smp_wmb();
2723
2724 preempt_disable();
2725 /* Do the work on each cpu */
2726 smp_call_function_many(tracing_buffer_mask,
2727 enable_trace_buffered_event, NULL, 1);
2728 preempt_enable();
2729}
2730
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002731static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002732
Steven Rostedtef5580d2009-02-27 19:38:04 -05002733struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002734trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002735 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002736 int type, unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002737 unsigned int trace_ctx)
Steven Rostedtccb469a2012-08-02 10:32:10 -04002738{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002739 struct ring_buffer_event *entry;
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002740 struct trace_array *tr = trace_file->tr;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002741 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002742
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002743 *current_rb = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002744
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002745 if (!tr->no_filter_buffering_ref &&
2746 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002747 (entry = this_cpu_read(trace_buffered_event))) {
2748 /* Try to use the per cpu buffer first */
2749 val = this_cpu_inc_return(trace_buffered_event_cnt);
Steven Rostedt (VMware)b220c042021-02-10 11:53:22 -05002750 if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002751 trace_event_setup(entry, type, trace_ctx);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002752 entry->array[0] = len;
2753 return entry;
2754 }
2755 this_cpu_dec(trace_buffered_event_cnt);
2756 }
2757
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002758 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2759 trace_ctx);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002760 /*
2761 * If tracing is off, but we have triggers enabled
2762 * we still need to look at the event data. Use the temp_buffer
Qiujun Huang906695e2020-10-31 16:57:14 +08002763 * to store the trace event for the trigger to use. It's recursive
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002764 * safe and will not be recorded anywhere.
2765 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002766 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002767 *current_rb = temp_buffer;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002768 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2769 trace_ctx);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002770 }
2771 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002772}
2773EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2774
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002775static DEFINE_SPINLOCK(tracepoint_iter_lock);
2776static DEFINE_MUTEX(tracepoint_printk_mutex);
2777
2778static void output_printk(struct trace_event_buffer *fbuffer)
2779{
2780 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002781 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002782 struct trace_event *event;
2783 unsigned long flags;
2784 struct trace_iterator *iter = tracepoint_print_iter;
2785
2786 /* We should never get here if iter is NULL */
2787 if (WARN_ON_ONCE(!iter))
2788 return;
2789
2790 event_call = fbuffer->trace_file->event_call;
2791 if (!event_call || !event_call->event.funcs ||
2792 !event_call->event.funcs->trace)
2793 return;
2794
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002795 file = fbuffer->trace_file;
2796 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2797 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2798 !filter_match_preds(file->filter, fbuffer->entry)))
2799 return;
2800
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002801 event = &fbuffer->trace_file->event_call->event;
2802
2803 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2804 trace_seq_init(&iter->seq);
2805 iter->ent = fbuffer->entry;
2806 event_call->event.funcs->trace(iter, 0, event);
2807 trace_seq_putc(&iter->seq, 0);
2808 printk("%s", iter->seq.buffer);
2809
2810 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2811}
2812
2813int tracepoint_printk_sysctl(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002814 void *buffer, size_t *lenp,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002815 loff_t *ppos)
2816{
2817 int save_tracepoint_printk;
2818 int ret;
2819
2820 mutex_lock(&tracepoint_printk_mutex);
2821 save_tracepoint_printk = tracepoint_printk;
2822
2823 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2824
2825 /*
2826 * This will force exiting early, as tracepoint_printk
2827 * is always zero when tracepoint_printk_iter is not allocated
2828 */
2829 if (!tracepoint_print_iter)
2830 tracepoint_printk = 0;
2831
2832 if (save_tracepoint_printk == tracepoint_printk)
2833 goto out;
2834
2835 if (tracepoint_printk)
2836 static_key_enable(&tracepoint_printk_key.key);
2837 else
2838 static_key_disable(&tracepoint_printk_key.key);
2839
2840 out:
2841 mutex_unlock(&tracepoint_printk_mutex);
2842
2843 return ret;
2844}
2845
2846void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2847{
2848 if (static_key_false(&tracepoint_printk_key.key))
2849 output_printk(fbuffer);
2850
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +03002851 if (static_branch_unlikely(&trace_event_exports_enabled))
2852 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002853 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002854 fbuffer->event, fbuffer->entry,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002855 fbuffer->trace_ctx, fbuffer->regs);
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002856}
2857EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2858
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002859/*
2860 * Skip 3:
2861 *
2862 * trace_buffer_unlock_commit_regs()
2863 * trace_event_buffer_commit()
2864 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302865 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002866# define STACK_SKIP 3
2867
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002868void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002869 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002870 struct ring_buffer_event *event,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002871 unsigned int trace_ctx,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002872 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002873{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002874 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002875
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002876 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002877 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002878 * Note, we can still get here via blktrace, wakeup tracer
2879 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002880 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002881 */
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002882 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2883 ftrace_trace_userstack(tr, buffer, trace_ctx);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002884}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002885
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002886/*
2887 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2888 */
2889void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002890trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002891 struct ring_buffer_event *event)
2892{
2893 __buffer_unlock_commit(buffer, event);
2894}
2895
Ingo Molnare309b412008-05-12 21:20:51 +02002896void
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002897trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2898 parent_ip, unsigned int trace_ctx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002899{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002900 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002901 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002902 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002903 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002904
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002905 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002906 trace_ctx);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002907 if (!event)
2908 return;
2909 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002910 entry->ip = ip;
2911 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002912
Chunyan Zhang478409d2016-11-21 15:57:18 +08002913 if (!call_filter_check_discard(call, entry, buffer, event)) {
Tingwei Zhang8438f522020-10-05 10:13:13 +03002914 if (static_branch_unlikely(&trace_function_exports_enabled))
2915 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002916 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002917 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002918}
2919
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002920#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002921
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002922/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2923#define FTRACE_KSTACK_NESTING 4
2924
2925#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2926
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002927struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002928 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002929};
2930
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002931
2932struct ftrace_stacks {
2933 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2934};
2935
2936static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002937static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2938
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002939static void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002940 unsigned int trace_ctx,
2941 int skip, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002942{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002943 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002944 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002945 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002946 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002947 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002948 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002949
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002950 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002951 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002952 * If regs is set, then these functions will not be in the way.
2953 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002954#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002955 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002956 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002957#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002958
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002959 preempt_disable_notrace();
2960
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002961 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2962
2963 /* This should never happen. If it does, yell once and skip */
Qiujun Huang906695e2020-10-31 16:57:14 +08002964 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002965 goto out;
2966
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002967 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002968 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2969 * interrupt will either see the value pre increment or post
2970 * increment. If the interrupt happens pre increment it will have
2971 * restored the counter when it returns. We just need a barrier to
2972 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002973 */
2974 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002975
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002976 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002977 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002978
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002979 if (regs) {
2980 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2981 size, skip);
2982 } else {
2983 nr_entries = stack_trace_save(fstack->calls, size, skip);
2984 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002985
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002986 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002987 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
Steven Rostedt (VMware)9deb1932021-04-01 13:54:40 -04002988 (sizeof(*entry) - sizeof(entry->caller)) + size,
2989 trace_ctx);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002990 if (!event)
2991 goto out;
2992 entry = ring_buffer_event_data(event);
2993
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002994 memcpy(&entry->caller, fstack->calls, size);
2995 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002996
Tom Zanussif306cc82013-10-24 08:34:17 -05002997 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002998 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002999
3000 out:
3001 /* Again, don't let gcc optimize things here */
3002 barrier();
Shan Wei82146522012-11-19 13:21:01 +08003003 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003004 preempt_enable_notrace();
3005
Ingo Molnarf0a920d2008-05-12 21:20:47 +02003006}
3007
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003008static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003009 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003010 unsigned int trace_ctx,
3011 int skip, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05003012{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003013 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05003014 return;
3015
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003016 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05003017}
3018
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003019void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3020 int skip)
Steven Rostedt38697052008-10-01 13:14:09 -04003021{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003022 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003023
3024 if (rcu_is_watching()) {
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003025 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003026 return;
3027 }
3028
3029 /*
3030 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3031 * but if the above rcu_is_watching() failed, then the NMI
3032 * triggered someplace critical, and rcu_irq_enter() should
3033 * not be called from NMI.
3034 */
3035 if (unlikely(in_nmi()))
3036 return;
3037
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003038 rcu_irq_enter_irqson();
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003039 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003040 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003041}
3042
Steven Rostedt03889382009-12-11 09:48:22 -05003043/**
3044 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003045 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003046 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003047void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003048{
Steven Rostedt03889382009-12-11 09:48:22 -05003049 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003050 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003051
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003052#ifndef CONFIG_UNWINDER_ORC
3053 /* Skip 1 to skip this function. */
3054 skip++;
3055#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003056 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003057 tracing_gen_ctx(), skip, NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003058}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003059EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003060
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003061#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003062static DEFINE_PER_CPU(int, user_stack_count);
3063
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003064static void
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003065ftrace_trace_userstack(struct trace_array *tr,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003066 struct trace_buffer *buffer, unsigned int trace_ctx)
Török Edwin02b67512008-11-22 13:28:47 +02003067{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003068 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003069 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003070 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003071
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003072 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003073 return;
3074
Steven Rostedtb6345872010-03-12 20:03:30 -05003075 /*
3076 * NMIs can not handle page faults, even with fix ups.
3077 * The save user stack can (and often does) fault.
3078 */
3079 if (unlikely(in_nmi()))
3080 return;
3081
Steven Rostedt91e86e52010-11-10 12:56:12 +01003082 /*
3083 * prevent recursion, since the user stack tracing may
3084 * trigger other kernel events.
3085 */
3086 preempt_disable();
3087 if (__this_cpu_read(user_stack_count))
3088 goto out;
3089
3090 __this_cpu_inc(user_stack_count);
3091
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003092 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003093 sizeof(*entry), trace_ctx);
Török Edwin02b67512008-11-22 13:28:47 +02003094 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003095 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003096 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003097
Steven Rostedt48659d32009-09-11 11:36:23 -04003098 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003099 memset(&entry->caller, 0, sizeof(entry->caller));
3100
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003101 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003102 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003103 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003104
Li Zefan1dbd1952010-12-09 15:47:56 +08003105 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003106 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003107 out:
3108 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003109}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003110#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003111static void ftrace_trace_userstack(struct trace_array *tr,
3112 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003113 unsigned int trace_ctx)
Török Edwin02b67512008-11-22 13:28:47 +02003114{
Török Edwin02b67512008-11-22 13:28:47 +02003115}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003116#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003117
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003118#endif /* CONFIG_STACKTRACE */
3119
Steven Rostedt07d777f2011-09-22 14:01:55 -04003120/* created for use with alloc_percpu */
3121struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003122 int nesting;
3123 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003124};
3125
3126static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003127
3128/*
Qiujun Huang2b5894c2020-10-29 23:05:54 +08003129 * This allows for lockless recording. If we're nested too deeply, then
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003130 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003131 */
3132static char *get_trace_buf(void)
3133{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003134 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003135
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003136 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003137 return NULL;
3138
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003139 buffer->nesting++;
3140
3141 /* Interrupts must see nesting incremented before we use the buffer */
3142 barrier();
Qiujun Huangc1acb4a2020-10-30 00:19:05 +08003143 return &buffer->buffer[buffer->nesting - 1][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003144}
3145
3146static void put_trace_buf(void)
3147{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003148 /* Don't let the decrement of nesting leak before this */
3149 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003150 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003151}
3152
3153static int alloc_percpu_trace_buffer(void)
3154{
3155 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003156
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003157 if (trace_percpu_buffer)
3158 return 0;
3159
Steven Rostedt07d777f2011-09-22 14:01:55 -04003160 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003161 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003162 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003163
3164 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003165 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003166}
3167
Steven Rostedt81698832012-10-11 10:15:05 -04003168static int buffers_allocated;
3169
Steven Rostedt07d777f2011-09-22 14:01:55 -04003170void trace_printk_init_buffers(void)
3171{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003172 if (buffers_allocated)
3173 return;
3174
3175 if (alloc_percpu_trace_buffer())
3176 return;
3177
Steven Rostedt2184db42014-05-28 13:14:40 -04003178 /* trace_printk() is for debug use only. Don't use it in production. */
3179
Joe Perchesa395d6a2016-03-22 14:28:09 -07003180 pr_warn("\n");
3181 pr_warn("**********************************************************\n");
3182 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3183 pr_warn("** **\n");
3184 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3185 pr_warn("** **\n");
3186 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3187 pr_warn("** unsafe for production use. **\n");
3188 pr_warn("** **\n");
3189 pr_warn("** If you see this message and you are not debugging **\n");
3190 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3191 pr_warn("** **\n");
3192 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3193 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003194
Steven Rostedtb382ede62012-10-10 21:44:34 -04003195 /* Expand the buffers to set size */
3196 tracing_update_buffers();
3197
Steven Rostedt07d777f2011-09-22 14:01:55 -04003198 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003199
3200 /*
3201 * trace_printk_init_buffers() can be called by modules.
3202 * If that happens, then we need to start cmdline recording
3203 * directly here. If the global_trace.buffer is already
3204 * allocated here, then this was called by module code.
3205 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003206 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003207 tracing_start_cmdline_record();
3208}
Divya Indif45d1222019-03-20 11:28:51 -07003209EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003210
3211void trace_printk_start_comm(void)
3212{
3213 /* Start tracing comms if trace printk is set */
3214 if (!buffers_allocated)
3215 return;
3216 tracing_start_cmdline_record();
3217}
3218
3219static void trace_printk_start_stop_comm(int enabled)
3220{
3221 if (!buffers_allocated)
3222 return;
3223
3224 if (enabled)
3225 tracing_start_cmdline_record();
3226 else
3227 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003228}
3229
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003230/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003231 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003232 * @ip: The address of the caller
3233 * @fmt: The string format to write to the buffer
3234 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003235 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003236int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003237{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003238 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003239 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003240 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003241 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003242 struct bprint_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003243 unsigned int trace_ctx;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003244 char *tbuffer;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003245 int len = 0, size;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003246
3247 if (unlikely(tracing_selftest_running || tracing_disabled))
3248 return 0;
3249
3250 /* Don't pollute graph traces with trace_vprintk internals */
3251 pause_graph_tracing();
3252
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003253 trace_ctx = tracing_gen_ctx();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003254 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003255
Steven Rostedt07d777f2011-09-22 14:01:55 -04003256 tbuffer = get_trace_buf();
3257 if (!tbuffer) {
3258 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003259 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003260 }
3261
3262 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3263
3264 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003265 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003266
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003267 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003268 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003269 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003270 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003271 trace_ctx);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003272 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003273 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003274 entry = ring_buffer_event_data(event);
3275 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003276 entry->fmt = fmt;
3277
Steven Rostedt07d777f2011-09-22 14:01:55 -04003278 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003279 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003280 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003281 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003282 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003283
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003284out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003285 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003286out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003287 put_trace_buf();
3288
3289out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003290 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003291 unpause_graph_tracing();
3292
3293 return len;
3294}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003295EXPORT_SYMBOL_GPL(trace_vbprintk);
3296
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003297__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003298static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003299__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003300 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003301{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003302 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003303 struct ring_buffer_event *event;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003304 int len = 0, size;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003305 struct print_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003306 unsigned int trace_ctx;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003307 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003308
3309 if (tracing_disabled || tracing_selftest_running)
3310 return 0;
3311
Steven Rostedt07d777f2011-09-22 14:01:55 -04003312 /* Don't pollute graph traces with trace_vprintk internals */
3313 pause_graph_tracing();
3314
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003315 trace_ctx = tracing_gen_ctx();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003316 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003317
Steven Rostedt07d777f2011-09-22 14:01:55 -04003318
3319 tbuffer = get_trace_buf();
3320 if (!tbuffer) {
3321 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003322 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003323 }
3324
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003325 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003326
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003327 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003328 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003329 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003330 trace_ctx);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003331 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003332 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003333 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003334 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003335
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003336 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003337 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003338 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003339 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003340 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003341
3342out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003343 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003344 put_trace_buf();
3345
3346out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003347 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003348 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003349
3350 return len;
3351}
Steven Rostedt659372d2009-09-03 19:11:07 -04003352
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003353__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003354int trace_array_vprintk(struct trace_array *tr,
3355 unsigned long ip, const char *fmt, va_list args)
3356{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003357 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003358}
3359
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003360/**
3361 * trace_array_printk - Print a message to a specific instance
3362 * @tr: The instance trace_array descriptor
3363 * @ip: The instruction pointer that this is called from.
3364 * @fmt: The format to print (printf format)
3365 *
3366 * If a subsystem sets up its own instance, they have the right to
3367 * printk strings into their tracing instance buffer using this
3368 * function. Note, this function will not write into the top level
3369 * buffer (use trace_printk() for that), as writing into the top level
3370 * buffer should only have events that can be individually disabled.
3371 * trace_printk() is only used for debugging a kernel, and should not
Ingo Molnarf2cc0202021-03-23 18:49:35 +01003372 * be ever incorporated in normal use.
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003373 *
3374 * trace_array_printk() can be used, as it will not add noise to the
3375 * top level tracing buffer.
3376 *
3377 * Note, trace_array_init_printk() must be called on @tr before this
3378 * can be used.
3379 */
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003380__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003381int trace_array_printk(struct trace_array *tr,
3382 unsigned long ip, const char *fmt, ...)
3383{
3384 int ret;
3385 va_list ap;
3386
Divya Indi953ae452019-08-14 10:55:25 -07003387 if (!tr)
3388 return -ENOENT;
3389
Steven Rostedt (VMware)c791cc42020-06-16 14:53:55 -04003390 /* This is only allowed for created instances */
3391 if (tr == &global_trace)
3392 return 0;
3393
3394 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3395 return 0;
3396
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003397 va_start(ap, fmt);
3398 ret = trace_array_vprintk(tr, ip, fmt, ap);
3399 va_end(ap);
3400 return ret;
3401}
Divya Indif45d1222019-03-20 11:28:51 -07003402EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003403
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003404/**
3405 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3406 * @tr: The trace array to initialize the buffers for
3407 *
3408 * As trace_array_printk() only writes into instances, they are OK to
3409 * have in the kernel (unlike trace_printk()). This needs to be called
3410 * before trace_array_printk() can be used on a trace_array.
3411 */
3412int trace_array_init_printk(struct trace_array *tr)
3413{
3414 if (!tr)
3415 return -ENOENT;
3416
3417 /* This is only allowed for created instances */
3418 if (tr == &global_trace)
3419 return -EINVAL;
3420
3421 return alloc_percpu_trace_buffer();
3422}
3423EXPORT_SYMBOL_GPL(trace_array_init_printk);
3424
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003425__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003426int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003427 unsigned long ip, const char *fmt, ...)
3428{
3429 int ret;
3430 va_list ap;
3431
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003432 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003433 return 0;
3434
3435 va_start(ap, fmt);
3436 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3437 va_end(ap);
3438 return ret;
3439}
3440
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003441__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003442int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3443{
Steven Rostedta813a152009-10-09 01:41:35 -04003444 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003445}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003446EXPORT_SYMBOL_GPL(trace_vprintk);
3447
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003448static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003449{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003450 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3451
Steven Rostedt5a90f572008-09-03 17:42:51 -04003452 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003453 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003454 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003455}
3456
Ingo Molnare309b412008-05-12 21:20:51 +02003457static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003458peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3459 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003460{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003461 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003462 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003463
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003464 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003465 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003466 if (lost_events)
3467 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3468 (unsigned long)-1 : 0;
3469 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003470 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003471 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003472 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003473
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003474 if (event) {
3475 iter->ent_size = ring_buffer_event_length(event);
3476 return ring_buffer_event_data(event);
3477 }
3478 iter->ent_size = 0;
3479 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003480}
Steven Rostedtd7690412008-10-01 00:29:53 -04003481
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003482static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003483__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3484 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003485{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003486 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003487 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003488 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003489 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003490 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003491 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003492 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003493 int cpu;
3494
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003495 /*
3496 * If we are in a per_cpu trace file, don't bother by iterating over
3497 * all cpu and peek directly.
3498 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003499 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003500 if (ring_buffer_empty_cpu(buffer, cpu_file))
3501 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003502 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003503 if (ent_cpu)
3504 *ent_cpu = cpu_file;
3505
3506 return ent;
3507 }
3508
Steven Rostedtab464282008-05-12 21:21:00 +02003509 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003510
3511 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003512 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003513
Steven Rostedtbc21b472010-03-31 19:49:26 -04003514 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003515
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003516 /*
3517 * Pick the entry with the smallest timestamp:
3518 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003519 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003520 next = ent;
3521 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003522 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003523 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003524 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003525 }
3526 }
3527
Steven Rostedt12b5da32012-03-27 10:43:28 -04003528 iter->ent_size = next_size;
3529
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003530 if (ent_cpu)
3531 *ent_cpu = next_cpu;
3532
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003533 if (ent_ts)
3534 *ent_ts = next_ts;
3535
Steven Rostedtbc21b472010-03-31 19:49:26 -04003536 if (missing_events)
3537 *missing_events = next_lost;
3538
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003539 return next;
3540}
3541
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003542#define STATIC_FMT_BUF_SIZE 128
3543static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3544
3545static char *trace_iter_expand_format(struct trace_iterator *iter)
3546{
3547 char *tmp;
3548
3549 if (iter->fmt == static_fmt_buf)
3550 return NULL;
3551
3552 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3553 GFP_KERNEL);
3554 if (tmp) {
3555 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3556 iter->fmt = tmp;
3557 }
3558
3559 return tmp;
3560}
3561
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003562/* Returns true if the string is safe to dereference from an event */
3563static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3564{
3565 unsigned long addr = (unsigned long)str;
3566 struct trace_event *trace_event;
3567 struct trace_event_call *event;
3568
3569 /* OK if part of the event data */
3570 if ((addr >= (unsigned long)iter->ent) &&
3571 (addr < (unsigned long)iter->ent + iter->ent_size))
3572 return true;
3573
3574 /* OK if part of the temp seq buffer */
3575 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3576 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3577 return true;
3578
3579 /* Core rodata can not be freed */
3580 if (is_kernel_rodata(addr))
3581 return true;
3582
3583 if (trace_is_tracepoint_string(str))
3584 return true;
3585
3586 /*
3587 * Now this could be a module event, referencing core module
3588 * data, which is OK.
3589 */
3590 if (!iter->ent)
3591 return false;
3592
3593 trace_event = ftrace_find_event(iter->ent->type);
3594 if (!trace_event)
3595 return false;
3596
3597 event = container_of(trace_event, struct trace_event_call, event);
3598 if (!event->mod)
3599 return false;
3600
3601 /* Would rather have rodata, but this will suffice */
3602 if (within_module_core(addr, event->mod))
3603 return true;
3604
3605 return false;
3606}
3607
3608static const char *show_buffer(struct trace_seq *s)
3609{
3610 struct seq_buf *seq = &s->seq;
3611
3612 seq_buf_terminate(seq);
3613
3614 return seq->buffer;
3615}
3616
3617static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3618
3619static int test_can_verify_check(const char *fmt, ...)
3620{
3621 char buf[16];
3622 va_list ap;
3623 int ret;
3624
3625 /*
3626 * The verifier is dependent on vsnprintf() modifies the va_list
3627 * passed to it, where it is sent as a reference. Some architectures
3628 * (like x86_32) passes it by value, which means that vsnprintf()
3629 * does not modify the va_list passed to it, and the verifier
3630 * would then need to be able to understand all the values that
3631 * vsnprintf can use. If it is passed by value, then the verifier
3632 * is disabled.
3633 */
3634 va_start(ap, fmt);
3635 vsnprintf(buf, 16, "%d", ap);
3636 ret = va_arg(ap, int);
3637 va_end(ap);
3638
3639 return ret;
3640}
3641
3642static void test_can_verify(void)
3643{
3644 if (!test_can_verify_check("%d %d", 0, 1)) {
3645 pr_info("trace event string verifier disabled\n");
3646 static_branch_inc(&trace_no_verify);
3647 }
3648}
3649
3650/**
3651 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3652 * @iter: The iterator that holds the seq buffer and the event being printed
3653 * @fmt: The format used to print the event
3654 * @ap: The va_list holding the data to print from @fmt.
3655 *
3656 * This writes the data into the @iter->seq buffer using the data from
3657 * @fmt and @ap. If the format has a %s, then the source of the string
3658 * is examined to make sure it is safe to print, otherwise it will
3659 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3660 * pointer.
3661 */
3662void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3663 va_list ap)
3664{
3665 const char *p = fmt;
3666 const char *str;
3667 int i, j;
3668
3669 if (WARN_ON_ONCE(!fmt))
3670 return;
3671
3672 if (static_branch_unlikely(&trace_no_verify))
3673 goto print;
3674
3675 /* Don't bother checking when doing a ftrace_dump() */
3676 if (iter->fmt == static_fmt_buf)
3677 goto print;
3678
3679 while (*p) {
3680 j = 0;
3681
3682 /* We only care about %s and variants */
3683 for (i = 0; p[i]; i++) {
3684 if (i + 1 >= iter->fmt_size) {
3685 /*
3686 * If we can't expand the copy buffer,
3687 * just print it.
3688 */
3689 if (!trace_iter_expand_format(iter))
3690 goto print;
3691 }
3692
3693 if (p[i] == '\\' && p[i+1]) {
3694 i++;
3695 continue;
3696 }
3697 if (p[i] == '%') {
3698 /* Need to test cases like %08.*s */
3699 for (j = 1; p[i+j]; j++) {
3700 if (isdigit(p[i+j]) ||
3701 p[i+j] == '*' ||
3702 p[i+j] == '.')
3703 continue;
3704 break;
3705 }
3706 if (p[i+j] == 's')
3707 break;
3708 }
3709 j = 0;
3710 }
3711 /* If no %s found then just print normally */
3712 if (!p[i])
3713 break;
3714
3715 /* Copy up to the %s, and print that */
3716 strncpy(iter->fmt, p, i);
3717 iter->fmt[i] = '\0';
3718 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3719
3720 /* The ap now points to the string data of the %s */
3721 str = va_arg(ap, const char *);
3722
3723 /*
3724 * If you hit this warning, it is likely that the
3725 * trace event in question used %s on a string that
3726 * was saved at the time of the event, but may not be
3727 * around when the trace is read. Use __string(),
3728 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3729 * instead. See samples/trace_events/trace-events-sample.h
3730 * for reference.
3731 */
3732 if (WARN_ONCE(!trace_safe_str(iter, str),
3733 "fmt: '%s' current_buffer: '%s'",
3734 fmt, show_buffer(&iter->seq))) {
3735 int ret;
3736
3737 /* Try to safely read the string */
3738 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3739 iter->fmt_size);
3740 if (ret < 0)
3741 trace_seq_printf(&iter->seq, "(0x%px)", str);
3742 else
3743 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3744 str, iter->fmt);
3745 str = "[UNSAFE-MEMORY]";
3746 strcpy(iter->fmt, "%s");
3747 } else {
3748 strncpy(iter->fmt, p + i, j + 1);
3749 iter->fmt[j+1] = '\0';
3750 }
3751 trace_seq_printf(&iter->seq, iter->fmt, str);
3752
3753 p += i + j + 1;
3754 }
3755 print:
3756 if (*p)
3757 trace_seq_vprintf(&iter->seq, p, ap);
3758}
3759
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003760const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3761{
3762 const char *p, *new_fmt;
3763 char *q;
3764
3765 if (WARN_ON_ONCE(!fmt))
3766 return fmt;
3767
Masami Hiramatsua345a672020-10-15 23:55:25 +09003768 if (iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3769 return fmt;
3770
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003771 p = fmt;
3772 new_fmt = q = iter->fmt;
3773 while (*p) {
3774 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3775 if (!trace_iter_expand_format(iter))
3776 return fmt;
3777
3778 q += iter->fmt - new_fmt;
3779 new_fmt = iter->fmt;
3780 }
3781
3782 *q++ = *p++;
3783
3784 /* Replace %p with %px */
3785 if (p[-1] == '%') {
3786 if (p[0] == '%') {
3787 *q++ = *p++;
3788 } else if (p[0] == 'p' && !isalnum(p[1])) {
3789 *q++ = *p++;
3790 *q++ = 'x';
3791 }
3792 }
3793 }
3794 *q = '\0';
3795
3796 return new_fmt;
3797}
3798
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003799#define STATIC_TEMP_BUF_SIZE 128
Minchan Kim8fa655a2020-11-25 14:56:54 -08003800static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003801
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003802/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003803struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3804 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003805{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003806 /* __find_next_entry will reset ent_size */
3807 int ent_size = iter->ent_size;
3808 struct trace_entry *entry;
3809
3810 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003811 * If called from ftrace_dump(), then the iter->temp buffer
3812 * will be the static_temp_buf and not created from kmalloc.
3813 * If the entry size is greater than the buffer, we can
3814 * not save it. Just return NULL in that case. This is only
3815 * used to add markers when two consecutive events' time
3816 * stamps have a large delta. See trace_print_lat_context()
3817 */
3818 if (iter->temp == static_temp_buf &&
3819 STATIC_TEMP_BUF_SIZE < ent_size)
3820 return NULL;
3821
3822 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003823 * The __find_next_entry() may call peek_next_entry(), which may
3824 * call ring_buffer_peek() that may make the contents of iter->ent
3825 * undefined. Need to copy iter->ent now.
3826 */
3827 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003828 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3829 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003830 void *temp;
3831 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3832 if (!temp)
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003833 return NULL;
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003834 kfree(iter->temp);
3835 iter->temp = temp;
3836 iter->temp_size = iter->ent_size;
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003837 }
3838 memcpy(iter->temp, iter->ent, iter->ent_size);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003839 iter->ent = iter->temp;
3840 }
3841 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3842 /* Put back the original ent_size */
3843 iter->ent_size = ent_size;
3844
3845 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003846}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003847
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003848/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003849void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003850{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003851 iter->ent = __find_next_entry(iter, &iter->cpu,
3852 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003853
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003854 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003855 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003856
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003857 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003858}
3859
Ingo Molnare309b412008-05-12 21:20:51 +02003860static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003861{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003862 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003863 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003864}
3865
Ingo Molnare309b412008-05-12 21:20:51 +02003866static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003867{
3868 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003869 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003870 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003871
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003872 WARN_ON_ONCE(iter->leftover);
3873
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003874 (*pos)++;
3875
3876 /* can't go backwards */
3877 if (iter->idx > i)
3878 return NULL;
3879
3880 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003881 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003882 else
3883 ent = iter;
3884
3885 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003886 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003887
3888 iter->pos = *pos;
3889
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003890 return ent;
3891}
3892
Jason Wessel955b61e2010-08-05 09:22:23 -05003893void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003894{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003895 struct ring_buffer_iter *buf_iter;
3896 unsigned long entries = 0;
3897 u64 ts;
3898
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003899 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003900
Steven Rostedt6d158a82012-06-27 20:46:14 -04003901 buf_iter = trace_buffer_iter(iter, cpu);
3902 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003903 return;
3904
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003905 ring_buffer_iter_reset(buf_iter);
3906
3907 /*
3908 * We could have the case with the max latency tracers
3909 * that a reset never took place on a cpu. This is evident
3910 * by the timestamp being before the start of the buffer.
3911 */
YangHui69243722020-06-16 11:36:46 +08003912 while (ring_buffer_iter_peek(buf_iter, &ts)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003913 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003914 break;
3915 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003916 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003917 }
3918
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003919 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003920}
3921
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003922/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003923 * The current tracer is copied to avoid a global locking
3924 * all around.
3925 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003926static void *s_start(struct seq_file *m, loff_t *pos)
3927{
3928 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003929 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003930 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003931 void *p = NULL;
3932 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003933 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003934
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003935 /*
3936 * copy the tracer to avoid using a global lock all around.
3937 * iter->trace is a copy of current_trace, the pointer to the
3938 * name may be used instead of a strcmp(), as iter->trace->name
3939 * will point to the same string as current_trace->name.
3940 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003941 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003942 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3943 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003944 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003945
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003946#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003947 if (iter->snapshot && iter->trace->use_max_tr)
3948 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003949#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003950
3951 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003952 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003953
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003954 if (*pos != iter->pos) {
3955 iter->ent = NULL;
3956 iter->cpu = 0;
3957 iter->idx = -1;
3958
Steven Rostedtae3b5092013-01-23 15:22:59 -05003959 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003960 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003961 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003962 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003963 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003964
Lai Jiangshanac91d852010-03-02 17:54:50 +08003965 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003966 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3967 ;
3968
3969 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003970 /*
3971 * If we overflowed the seq_file before, then we want
3972 * to just reuse the trace_seq buffer again.
3973 */
3974 if (iter->leftover)
3975 p = iter;
3976 else {
3977 l = *pos - 1;
3978 p = s_next(m, p, &l);
3979 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003980 }
3981
Lai Jiangshan4f535962009-05-18 19:35:34 +08003982 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003983 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003984 return p;
3985}
3986
3987static void s_stop(struct seq_file *m, void *p)
3988{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003989 struct trace_iterator *iter = m->private;
3990
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003991#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003992 if (iter->snapshot && iter->trace->use_max_tr)
3993 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003994#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003995
3996 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07003997 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003998
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003999 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004000 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004001}
4002
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004003static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004004get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004005 unsigned long *entries, int cpu)
4006{
4007 unsigned long count;
4008
4009 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4010 /*
4011 * If this buffer has skipped entries, then we hold all
4012 * entries for the trace and we need to ignore the
4013 * ones before the time stamp.
4014 */
4015 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4016 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4017 /* total is the same as the entries */
4018 *total = count;
4019 } else
4020 *total = count +
4021 ring_buffer_overrun_cpu(buf->buffer, cpu);
4022 *entries = count;
4023}
4024
4025static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004026get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004027 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004028{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004029 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004030 int cpu;
4031
4032 *total = 0;
4033 *entries = 0;
4034
4035 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004036 get_total_entries_cpu(buf, &t, &e, cpu);
4037 *total += t;
4038 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004039 }
4040}
4041
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004042unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4043{
4044 unsigned long total, entries;
4045
4046 if (!tr)
4047 tr = &global_trace;
4048
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004049 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004050
4051 return entries;
4052}
4053
4054unsigned long trace_total_entries(struct trace_array *tr)
4055{
4056 unsigned long total, entries;
4057
4058 if (!tr)
4059 tr = &global_trace;
4060
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004061 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004062
4063 return entries;
4064}
4065
Ingo Molnare309b412008-05-12 21:20:51 +02004066static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004067{
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004068 seq_puts(m, "# _------=> CPU# \n"
4069 "# / _-----=> irqs-off \n"
4070 "# | / _----=> need-resched \n"
4071 "# || / _---=> hardirq/softirq \n"
4072 "# ||| / _--=> preempt-depth \n"
4073 "# |||| / delay \n"
4074 "# cmd pid ||||| time | caller \n"
4075 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004076}
4077
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004078static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004079{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004080 unsigned long total;
4081 unsigned long entries;
4082
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004083 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004084 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4085 entries, total, num_online_cpus());
4086 seq_puts(m, "#\n");
4087}
4088
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004089static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004090 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004091{
Joel Fernandes441dae82017-06-25 22:38:43 -07004092 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4093
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004094 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07004095
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004096 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4097 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004098}
4099
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004100static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004101 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05004102{
Joel Fernandes441dae82017-06-25 22:38:43 -07004103 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004104 const char *space = " ";
4105 int prec = tgid ? 12 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07004106
Quentin Perret9e738212019-02-14 15:29:50 +00004107 print_event_info(buf, m);
4108
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004109 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
4110 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4111 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4112 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4113 seq_printf(m, "# %.*s||| / delay\n", prec, space);
4114 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4115 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05004116}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004117
Jiri Olsa62b915f2010-04-02 19:01:22 +02004118void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004119print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4120{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004121 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004122 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004123 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004124 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004125 unsigned long entries;
4126 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004127 const char *name = "preemption";
4128
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05004129 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004130
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004131 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004132
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004133 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004134 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004135 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004136 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004137 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004138 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02004139 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004140 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02004141 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004142 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004143#if defined(CONFIG_PREEMPT_NONE)
4144 "server",
4145#elif defined(CONFIG_PREEMPT_VOLUNTARY)
4146 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04004147#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004148 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02004149#elif defined(CONFIG_PREEMPT_RT)
4150 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004151#else
4152 "unknown",
4153#endif
4154 /* These are reserved for later use */
4155 0, 0, 0, 0);
4156#ifdef CONFIG_SMP
4157 seq_printf(m, " #P:%d)\n", num_online_cpus());
4158#else
4159 seq_puts(m, ")\n");
4160#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004161 seq_puts(m, "# -----------------\n");
4162 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004163 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07004164 data->comm, data->pid,
4165 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004166 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004167 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004168
4169 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004170 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02004171 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4172 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004173 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02004174 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4175 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04004176 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004177 }
4178
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004179 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004180}
4181
Steven Rostedta3097202008-11-07 22:36:02 -05004182static void test_cpu_buff_start(struct trace_iterator *iter)
4183{
4184 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004185 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05004186
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004187 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004188 return;
4189
4190 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4191 return;
4192
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07004193 if (cpumask_available(iter->started) &&
4194 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05004195 return;
4196
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004197 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004198 return;
4199
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07004200 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04004201 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004202
4203 /* Don't print started cpu buffer for the first entry of the trace */
4204 if (iter->idx > 1)
4205 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4206 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05004207}
4208
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004209static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004210{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004211 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02004212 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004213 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004214 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004215 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004216
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004217 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004218
Steven Rostedta3097202008-11-07 22:36:02 -05004219 test_cpu_buff_start(iter);
4220
Steven Rostedtf633cef2008-12-23 23:24:13 -05004221 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004222
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004223 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004224 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4225 trace_print_lat_context(iter);
4226 else
4227 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004228 }
4229
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004230 if (trace_seq_has_overflowed(s))
4231 return TRACE_TYPE_PARTIAL_LINE;
4232
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004233 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004234 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004235
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004236 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004237
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004238 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004239}
4240
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004241static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004242{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004243 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004244 struct trace_seq *s = &iter->seq;
4245 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004246 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004247
4248 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004249
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004250 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004251 trace_seq_printf(s, "%d %d %llu ",
4252 entry->pid, iter->cpu, iter->ts);
4253
4254 if (trace_seq_has_overflowed(s))
4255 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004256
Steven Rostedtf633cef2008-12-23 23:24:13 -05004257 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004258 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004259 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004260
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004261 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004262
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004263 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004264}
4265
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004266static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004267{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004268 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004269 struct trace_seq *s = &iter->seq;
4270 unsigned char newline = '\n';
4271 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004272 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004273
4274 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004275
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004276 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004277 SEQ_PUT_HEX_FIELD(s, entry->pid);
4278 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4279 SEQ_PUT_HEX_FIELD(s, iter->ts);
4280 if (trace_seq_has_overflowed(s))
4281 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004282 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004283
Steven Rostedtf633cef2008-12-23 23:24:13 -05004284 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004285 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04004286 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004287 if (ret != TRACE_TYPE_HANDLED)
4288 return ret;
4289 }
Steven Rostedt7104f302008-10-01 10:52:51 -04004290
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004291 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004292
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004293 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004294}
4295
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004296static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004297{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004298 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004299 struct trace_seq *s = &iter->seq;
4300 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004301 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004302
4303 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004304
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004305 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004306 SEQ_PUT_FIELD(s, entry->pid);
4307 SEQ_PUT_FIELD(s, iter->cpu);
4308 SEQ_PUT_FIELD(s, iter->ts);
4309 if (trace_seq_has_overflowed(s))
4310 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004311 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004312
Steven Rostedtf633cef2008-12-23 23:24:13 -05004313 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04004314 return event ? event->funcs->binary(iter, 0, event) :
4315 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004316}
4317
Jiri Olsa62b915f2010-04-02 19:01:22 +02004318int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004319{
Steven Rostedt6d158a82012-06-27 20:46:14 -04004320 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004321 int cpu;
4322
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004323 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05004324 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004325 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004326 buf_iter = trace_buffer_iter(iter, cpu);
4327 if (buf_iter) {
4328 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004329 return 0;
4330 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004331 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004332 return 0;
4333 }
4334 return 1;
4335 }
4336
Steven Rostedtab464282008-05-12 21:21:00 +02004337 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004338 buf_iter = trace_buffer_iter(iter, cpu);
4339 if (buf_iter) {
4340 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004341 return 0;
4342 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004343 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004344 return 0;
4345 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004346 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004347
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004348 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004349}
4350
Lai Jiangshan4f535962009-05-18 19:35:34 +08004351/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004352enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004353{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004354 struct trace_array *tr = iter->tr;
4355 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004356 enum print_line_t ret;
4357
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004358 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004359 if (iter->lost_events == (unsigned long)-1)
4360 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4361 iter->cpu);
4362 else
4363 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4364 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004365 if (trace_seq_has_overflowed(&iter->seq))
4366 return TRACE_TYPE_PARTIAL_LINE;
4367 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004368
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004369 if (iter->trace && iter->trace->print_line) {
4370 ret = iter->trace->print_line(iter);
4371 if (ret != TRACE_TYPE_UNHANDLED)
4372 return ret;
4373 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004374
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004375 if (iter->ent->type == TRACE_BPUTS &&
4376 trace_flags & TRACE_ITER_PRINTK &&
4377 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4378 return trace_print_bputs_msg_only(iter);
4379
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004380 if (iter->ent->type == TRACE_BPRINT &&
4381 trace_flags & TRACE_ITER_PRINTK &&
4382 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004383 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004384
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004385 if (iter->ent->type == TRACE_PRINT &&
4386 trace_flags & TRACE_ITER_PRINTK &&
4387 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004388 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004389
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004390 if (trace_flags & TRACE_ITER_BIN)
4391 return print_bin_fmt(iter);
4392
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004393 if (trace_flags & TRACE_ITER_HEX)
4394 return print_hex_fmt(iter);
4395
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004396 if (trace_flags & TRACE_ITER_RAW)
4397 return print_raw_fmt(iter);
4398
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004399 return print_trace_fmt(iter);
4400}
4401
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004402void trace_latency_header(struct seq_file *m)
4403{
4404 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004405 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004406
4407 /* print nothing if the buffers are empty */
4408 if (trace_empty(iter))
4409 return;
4410
4411 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4412 print_trace_header(m, iter);
4413
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004414 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004415 print_lat_help_header(m);
4416}
4417
Jiri Olsa62b915f2010-04-02 19:01:22 +02004418void trace_default_header(struct seq_file *m)
4419{
4420 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004421 struct trace_array *tr = iter->tr;
4422 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004423
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004424 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4425 return;
4426
Jiri Olsa62b915f2010-04-02 19:01:22 +02004427 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4428 /* print nothing if the buffers are empty */
4429 if (trace_empty(iter))
4430 return;
4431 print_trace_header(m, iter);
4432 if (!(trace_flags & TRACE_ITER_VERBOSE))
4433 print_lat_help_header(m);
4434 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004435 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4436 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004437 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004438 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004439 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004440 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004441 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004442 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004443 }
4444}
4445
Steven Rostedte0a413f2011-09-29 21:26:16 -04004446static void test_ftrace_alive(struct seq_file *m)
4447{
4448 if (!ftrace_is_dead())
4449 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004450 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4451 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004452}
4453
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004454#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004455static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004456{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004457 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4458 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4459 "# Takes a snapshot of the main buffer.\n"
4460 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4461 "# (Doesn't have to be '2' works with any number that\n"
4462 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004463}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004464
4465static void show_snapshot_percpu_help(struct seq_file *m)
4466{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004467 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004468#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004469 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4470 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004471#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004472 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4473 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004474#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004475 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4476 "# (Doesn't have to be '2' works with any number that\n"
4477 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004478}
4479
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004480static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4481{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004482 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004483 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004484 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004485 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004486
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004487 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004488 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4489 show_snapshot_main_help(m);
4490 else
4491 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004492}
4493#else
4494/* Should never be called */
4495static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4496#endif
4497
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004498static int s_show(struct seq_file *m, void *v)
4499{
4500 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004501 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004502
4503 if (iter->ent == NULL) {
4504 if (iter->tr) {
4505 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4506 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004507 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004508 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004509 if (iter->snapshot && trace_empty(iter))
4510 print_snapshot_help(m, iter);
4511 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004512 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004513 else
4514 trace_default_header(m);
4515
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004516 } else if (iter->leftover) {
4517 /*
4518 * If we filled the seq_file buffer earlier, we
4519 * want to just show it now.
4520 */
4521 ret = trace_print_seq(m, &iter->seq);
4522
4523 /* ret should this time be zero, but you never know */
4524 iter->leftover = ret;
4525
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004526 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004527 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004528 ret = trace_print_seq(m, &iter->seq);
4529 /*
4530 * If we overflow the seq_file buffer, then it will
4531 * ask us for this data again at start up.
4532 * Use that instead.
4533 * ret is 0 if seq_file write succeeded.
4534 * -1 otherwise.
4535 */
4536 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004537 }
4538
4539 return 0;
4540}
4541
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004542/*
4543 * Should be used after trace_array_get(), trace_types_lock
4544 * ensures that i_cdev was already initialized.
4545 */
4546static inline int tracing_get_cpu(struct inode *inode)
4547{
4548 if (inode->i_cdev) /* See trace_create_cpu_file() */
4549 return (long)inode->i_cdev - 1;
4550 return RING_BUFFER_ALL_CPUS;
4551}
4552
James Morris88e9d342009-09-22 16:43:43 -07004553static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004554 .start = s_start,
4555 .next = s_next,
4556 .stop = s_stop,
4557 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004558};
4559
Ingo Molnare309b412008-05-12 21:20:51 +02004560static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004561__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004562{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004563 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004564 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004565 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004566
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004567 if (tracing_disabled)
4568 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004569
Jiri Olsa50e18b92012-04-25 10:23:39 +02004570 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004571 if (!iter)
4572 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004573
Gil Fruchter72917232015-06-09 10:32:35 +03004574 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004575 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004576 if (!iter->buffer_iter)
4577 goto release;
4578
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004579 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004580 * trace_find_next_entry() may need to save off iter->ent.
4581 * It will place it into the iter->temp buffer. As most
4582 * events are less than 128, allocate a buffer of that size.
4583 * If one is greater, then trace_find_next_entry() will
4584 * allocate a new buffer to adjust for the bigger iter->ent.
4585 * It's not critical if it fails to get allocated here.
4586 */
4587 iter->temp = kmalloc(128, GFP_KERNEL);
4588 if (iter->temp)
4589 iter->temp_size = 128;
4590
4591 /*
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09004592 * trace_event_printf() may need to modify given format
4593 * string to replace %p with %px so that it shows real address
4594 * instead of hash value. However, that is only for the event
4595 * tracing, other tracer may not need. Defer the allocation
4596 * until it is needed.
4597 */
4598 iter->fmt = NULL;
4599 iter->fmt_size = 0;
4600
4601 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004602 * We make a copy of the current tracer to avoid concurrent
4603 * changes on it while we are reading.
4604 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004605 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004606 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004607 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004608 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004609
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004610 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004611
Li Zefan79f55992009-06-15 14:58:26 +08004612 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004613 goto fail;
4614
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004615 iter->tr = tr;
4616
4617#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004618 /* Currently only the top directory has a snapshot */
4619 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004620 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004621 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004622#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004623 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004624 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004625 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004626 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004627 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004628
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004629 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004630 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004631 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004632
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004633 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004634 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004635 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4636
David Sharp8be07092012-11-13 12:18:22 -08004637 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004638 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004639 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4640
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004641 /*
4642 * If pause-on-trace is enabled, then stop the trace while
4643 * dumping, unless this is the "snapshot" file
4644 */
4645 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004646 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004647
Steven Rostedtae3b5092013-01-23 15:22:59 -05004648 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004649 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004650 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004651 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004652 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004653 }
4654 ring_buffer_read_prepare_sync();
4655 for_each_tracing_cpu(cpu) {
4656 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004657 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004658 }
4659 } else {
4660 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004661 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004662 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004663 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004664 ring_buffer_read_prepare_sync();
4665 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004666 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004667 }
4668
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004669 mutex_unlock(&trace_types_lock);
4670
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004671 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004672
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004673 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004674 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004675 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004676 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004677 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004678release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004679 seq_release_private(inode, file);
4680 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004681}
4682
4683int tracing_open_generic(struct inode *inode, struct file *filp)
4684{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004685 int ret;
4686
4687 ret = tracing_check_open_get_tr(NULL);
4688 if (ret)
4689 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004690
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004691 filp->private_data = inode->i_private;
4692 return 0;
4693}
4694
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004695bool tracing_is_disabled(void)
4696{
4697 return (tracing_disabled) ? true: false;
4698}
4699
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004700/*
4701 * Open and update trace_array ref count.
4702 * Must have the current trace_array passed to it.
4703 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004704int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004705{
4706 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004707 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004708
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004709 ret = tracing_check_open_get_tr(tr);
4710 if (ret)
4711 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004712
4713 filp->private_data = inode->i_private;
4714
4715 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004716}
4717
Hannes Eder4fd27352009-02-10 19:44:12 +01004718static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004719{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004720 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004721 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004722 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004723 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004724
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004725 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004726 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004727 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004728 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004729
Oleg Nesterov6484c712013-07-23 17:26:10 +02004730 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004731 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004732 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004733
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004734 for_each_tracing_cpu(cpu) {
4735 if (iter->buffer_iter[cpu])
4736 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4737 }
4738
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004739 if (iter->trace && iter->trace->close)
4740 iter->trace->close(iter);
4741
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004742 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004743 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004744 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004745
4746 __trace_array_put(tr);
4747
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004748 mutex_unlock(&trace_types_lock);
4749
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004750 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004751 free_cpumask_var(iter->started);
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09004752 kfree(iter->fmt);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004753 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004754 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004755 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004756 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004757
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004758 return 0;
4759}
4760
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004761static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4762{
4763 struct trace_array *tr = inode->i_private;
4764
4765 trace_array_put(tr);
4766 return 0;
4767}
4768
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004769static int tracing_single_release_tr(struct inode *inode, struct file *file)
4770{
4771 struct trace_array *tr = inode->i_private;
4772
4773 trace_array_put(tr);
4774
4775 return single_release(inode, file);
4776}
4777
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004778static int tracing_open(struct inode *inode, struct file *file)
4779{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004780 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004781 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004782 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004783
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004784 ret = tracing_check_open_get_tr(tr);
4785 if (ret)
4786 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004787
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004788 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004789 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4790 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004791 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004792
4793#ifdef CONFIG_TRACER_MAX_TRACE
4794 if (tr->current_trace->print_max)
4795 trace_buf = &tr->max_buffer;
4796#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004797
4798 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004799 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004800 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004801 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004802 }
4803
4804 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004805 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004806 if (IS_ERR(iter))
4807 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004808 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004809 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4810 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004811
4812 if (ret < 0)
4813 trace_array_put(tr);
4814
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004815 return ret;
4816}
4817
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004818/*
4819 * Some tracers are not suitable for instance buffers.
4820 * A tracer is always available for the global array (toplevel)
4821 * or if it explicitly states that it is.
4822 */
4823static bool
4824trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4825{
4826 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4827}
4828
4829/* Find the next tracer that this trace array may use */
4830static struct tracer *
4831get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4832{
4833 while (t && !trace_ok_for_array(t, tr))
4834 t = t->next;
4835
4836 return t;
4837}
4838
Ingo Molnare309b412008-05-12 21:20:51 +02004839static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004840t_next(struct seq_file *m, void *v, loff_t *pos)
4841{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004842 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004843 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004844
4845 (*pos)++;
4846
4847 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004848 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004849
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004850 return t;
4851}
4852
4853static void *t_start(struct seq_file *m, loff_t *pos)
4854{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004855 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004856 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004857 loff_t l = 0;
4858
4859 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004860
4861 t = get_tracer_for_array(tr, trace_types);
4862 for (; t && l < *pos; t = t_next(m, t, &l))
4863 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004864
4865 return t;
4866}
4867
4868static void t_stop(struct seq_file *m, void *p)
4869{
4870 mutex_unlock(&trace_types_lock);
4871}
4872
4873static int t_show(struct seq_file *m, void *v)
4874{
4875 struct tracer *t = v;
4876
4877 if (!t)
4878 return 0;
4879
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004880 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004881 if (t->next)
4882 seq_putc(m, ' ');
4883 else
4884 seq_putc(m, '\n');
4885
4886 return 0;
4887}
4888
James Morris88e9d342009-09-22 16:43:43 -07004889static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004890 .start = t_start,
4891 .next = t_next,
4892 .stop = t_stop,
4893 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004894};
4895
4896static int show_traces_open(struct inode *inode, struct file *file)
4897{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004898 struct trace_array *tr = inode->i_private;
4899 struct seq_file *m;
4900 int ret;
4901
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004902 ret = tracing_check_open_get_tr(tr);
4903 if (ret)
4904 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004905
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004906 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004907 if (ret) {
4908 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004909 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004910 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004911
4912 m = file->private_data;
4913 m->private = tr;
4914
4915 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004916}
4917
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004918static int show_traces_release(struct inode *inode, struct file *file)
4919{
4920 struct trace_array *tr = inode->i_private;
4921
4922 trace_array_put(tr);
4923 return seq_release(inode, file);
4924}
4925
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004926static ssize_t
4927tracing_write_stub(struct file *filp, const char __user *ubuf,
4928 size_t count, loff_t *ppos)
4929{
4930 return count;
4931}
4932
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004933loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004934{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004935 int ret;
4936
Slava Pestov364829b2010-11-24 15:13:16 -08004937 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004938 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004939 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004940 file->f_pos = ret = 0;
4941
4942 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004943}
4944
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004945static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004946 .open = tracing_open,
4947 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004948 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004949 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004950 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004951};
4952
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004953static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02004954 .open = show_traces_open,
4955 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004956 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004957 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02004958};
4959
4960static ssize_t
4961tracing_cpumask_read(struct file *filp, char __user *ubuf,
4962 size_t count, loff_t *ppos)
4963{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07004964 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08004965 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004966 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02004967
Changbin Du90e406f2017-11-30 11:39:43 +08004968 len = snprintf(NULL, 0, "%*pb\n",
4969 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4970 mask_str = kmalloc(len, GFP_KERNEL);
4971 if (!mask_str)
4972 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004973
Changbin Du90e406f2017-11-30 11:39:43 +08004974 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08004975 cpumask_pr_args(tr->tracing_cpumask));
4976 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004977 count = -EINVAL;
4978 goto out_err;
4979 }
Changbin Du90e406f2017-11-30 11:39:43 +08004980 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02004981
4982out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08004983 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02004984
4985 return count;
4986}
4987
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004988int tracing_set_cpumask(struct trace_array *tr,
4989 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02004990{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004991 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10304992
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09004993 if (!tr)
4994 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02004995
Steven Rostedta5e25882008-12-02 15:34:05 -05004996 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05004997 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02004998 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02004999 /*
5000 * Increase/decrease the disabled counter if we are
5001 * about to flip a bit in the cpumask:
5002 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005003 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305004 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005005 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5006 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005007 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005008 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305009 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005010 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5011 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005012 }
5013 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05005014 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05005015 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02005016
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005017 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005018
5019 return 0;
5020}
5021
5022static ssize_t
5023tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5024 size_t count, loff_t *ppos)
5025{
5026 struct trace_array *tr = file_inode(filp)->i_private;
5027 cpumask_var_t tracing_cpumask_new;
5028 int err;
5029
5030 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5031 return -ENOMEM;
5032
5033 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5034 if (err)
5035 goto err_free;
5036
5037 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5038 if (err)
5039 goto err_free;
5040
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305041 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005042
Ingo Molnarc7078de2008-05-12 21:20:52 +02005043 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005044
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005045err_free:
Li Zefan215368e2009-06-15 10:56:42 +08005046 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005047
5048 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02005049}
5050
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005051static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005052 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02005053 .read = tracing_cpumask_read,
5054 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005055 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005056 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005057};
5058
Li Zefanfdb372e2009-12-08 11:15:59 +08005059static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005060{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005061 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005062 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005063 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005064 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005065
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005066 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005067 tracer_flags = tr->current_trace->flags->val;
5068 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005069
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005070 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005071 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08005072 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005073 else
Li Zefanfdb372e2009-12-08 11:15:59 +08005074 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005075 }
5076
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005077 for (i = 0; trace_opts[i].name; i++) {
5078 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08005079 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005080 else
Li Zefanfdb372e2009-12-08 11:15:59 +08005081 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005082 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005083 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005084
Li Zefanfdb372e2009-12-08 11:15:59 +08005085 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005086}
5087
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005088static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08005089 struct tracer_flags *tracer_flags,
5090 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005091{
Chunyu Hud39cdd22016-03-08 21:37:01 +08005092 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005093 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005094
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005095 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005096 if (ret)
5097 return ret;
5098
5099 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08005100 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005101 else
Zhaolei77708412009-08-07 18:53:21 +08005102 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005103 return 0;
5104}
5105
Li Zefan8d18eaa2009-12-08 11:17:06 +08005106/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005107static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08005108{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005109 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005110 struct tracer_flags *tracer_flags = trace->flags;
5111 struct tracer_opt *opts = NULL;
5112 int i;
5113
5114 for (i = 0; tracer_flags->opts[i].name; i++) {
5115 opts = &tracer_flags->opts[i];
5116
5117 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005118 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005119 }
5120
5121 return -EINVAL;
5122}
5123
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005124/* Some tracers require overwrite to stay enabled */
5125int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5126{
5127 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5128 return -1;
5129
5130 return 0;
5131}
5132
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005133int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005134{
Prateek Sood3a53acf2019-12-10 09:15:16 +00005135 if ((mask == TRACE_ITER_RECORD_TGID) ||
5136 (mask == TRACE_ITER_RECORD_CMD))
5137 lockdep_assert_held(&event_mutex);
5138
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005139 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005140 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005141 return 0;
5142
5143 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005144 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05005145 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005146 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005147
5148 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005149 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005150 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005151 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08005152
5153 if (mask == TRACE_ITER_RECORD_CMD)
5154 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08005155
Joel Fernandesd914ba32017-06-26 19:01:55 -07005156 if (mask == TRACE_ITER_RECORD_TGID) {
5157 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08005158 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07005159 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07005160 GFP_KERNEL);
5161 if (!tgid_map) {
5162 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5163 return -ENOMEM;
5164 }
5165
5166 trace_event_enable_tgid_record(enabled);
5167 }
5168
Steven Rostedtc37775d2016-04-13 16:59:18 -04005169 if (mask == TRACE_ITER_EVENT_FORK)
5170 trace_event_follow_fork(tr, enabled);
5171
Namhyung Kim1e104862017-04-17 11:44:28 +09005172 if (mask == TRACE_ITER_FUNC_FORK)
5173 ftrace_pid_follow_fork(tr, enabled);
5174
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005175 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005176 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005177#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005178 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005179#endif
5180 }
Steven Rostedt81698832012-10-11 10:15:05 -04005181
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04005182 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04005183 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04005184 trace_printk_control(enabled);
5185 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005186
5187 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005188}
5189
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005190int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005191{
Li Zefan8d18eaa2009-12-08 11:17:06 +08005192 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005193 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08005194 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005195 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005196 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005197
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005198 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005199
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005200 len = str_has_prefix(cmp, "no");
5201 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005202 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005203
5204 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005205
Prateek Sood3a53acf2019-12-10 09:15:16 +00005206 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005207 mutex_lock(&trace_types_lock);
5208
Yisheng Xie591a0332018-05-17 16:36:03 +08005209 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005210 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08005211 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005212 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08005213 else
5214 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005215
5216 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00005217 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005218
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005219 /*
5220 * If the first trailing whitespace is replaced with '\0' by strstrip,
5221 * turn it back into a space.
5222 */
5223 if (orig_len > strlen(option))
5224 option[strlen(option)] = ' ';
5225
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005226 return ret;
5227}
5228
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005229static void __init apply_trace_boot_options(void)
5230{
5231 char *buf = trace_boot_options_buf;
5232 char *option;
5233
5234 while (true) {
5235 option = strsep(&buf, ",");
5236
5237 if (!option)
5238 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005239
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05005240 if (*option)
5241 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005242
5243 /* Put back the comma to allow this to be called again */
5244 if (buf)
5245 *(buf - 1) = ',';
5246 }
5247}
5248
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005249static ssize_t
5250tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5251 size_t cnt, loff_t *ppos)
5252{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005253 struct seq_file *m = filp->private_data;
5254 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005255 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005256 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005257
5258 if (cnt >= sizeof(buf))
5259 return -EINVAL;
5260
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005261 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005262 return -EFAULT;
5263
Steven Rostedta8dd2172013-01-09 20:54:17 -05005264 buf[cnt] = 0;
5265
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005266 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005267 if (ret < 0)
5268 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005269
Jiri Olsacf8517c2009-10-23 19:36:16 -04005270 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005271
5272 return cnt;
5273}
5274
Li Zefanfdb372e2009-12-08 11:15:59 +08005275static int tracing_trace_options_open(struct inode *inode, struct file *file)
5276{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005277 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005278 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005279
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005280 ret = tracing_check_open_get_tr(tr);
5281 if (ret)
5282 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005283
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005284 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5285 if (ret < 0)
5286 trace_array_put(tr);
5287
5288 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08005289}
5290
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005291static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08005292 .open = tracing_trace_options_open,
5293 .read = seq_read,
5294 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005295 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05005296 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005297};
5298
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005299static const char readme_msg[] =
5300 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005301 "# echo 0 > tracing_on : quick way to disable tracing\n"
5302 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5303 " Important files:\n"
5304 " trace\t\t\t- The static contents of the buffer\n"
5305 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5306 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5307 " current_tracer\t- function and latency tracers\n"
5308 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05005309 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005310 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5311 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5312 " trace_clock\t\t-change the clock used to order events\n"
5313 " local: Per cpu clock but may not be synced across CPUs\n"
5314 " global: Synced across CPUs but slows tracing down.\n"
5315 " counter: Not a clock, but just an increment\n"
5316 " uptime: Jiffy counter from time of boot\n"
5317 " perf: Same clock that perf events use\n"
5318#ifdef CONFIG_X86_64
5319 " x86-tsc: TSC cycle counter\n"
5320#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06005321 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5322 " delta: Delta difference against a buffer-wide timestamp\n"
5323 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005324 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04005325 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005326 " tracing_cpumask\t- Limit which CPUs to trace\n"
5327 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5328 "\t\t\t Remove sub-buffer with rmdir\n"
5329 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08005330 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005331 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005332 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005333#ifdef CONFIG_DYNAMIC_FTRACE
5334 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005335 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5336 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005337 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005338 "\t modules: Can select a group via module\n"
5339 "\t Format: :mod:<module-name>\n"
5340 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5341 "\t triggers: a command to perform when function is hit\n"
5342 "\t Format: <function>:<trigger>[:count]\n"
5343 "\t trigger: traceon, traceoff\n"
5344 "\t\t enable_event:<system>:<event>\n"
5345 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005346#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005347 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005348#endif
5349#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005350 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005351#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005352 "\t\t dump\n"
5353 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005354 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5355 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5356 "\t The first one will disable tracing every time do_fault is hit\n"
5357 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5358 "\t The first time do trap is hit and it disables tracing, the\n"
5359 "\t counter will decrement to 2. If tracing is already disabled,\n"
5360 "\t the counter will not decrement. It only decrements when the\n"
5361 "\t trigger did work\n"
5362 "\t To remove trigger without count:\n"
5363 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5364 "\t To remove trigger with a count:\n"
5365 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005366 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005367 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5368 "\t modules: Can select a group via module command :mod:\n"
5369 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005370#endif /* CONFIG_DYNAMIC_FTRACE */
5371#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005372 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5373 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005374 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5375 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005376#endif
5377#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5378 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005379 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005380 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5381#endif
5382#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005383 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5384 "\t\t\t snapshot buffer. Read the contents for more\n"
5385 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005386#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005387#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005388 " stack_trace\t\t- Shows the max stack trace when active\n"
5389 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005390 "\t\t\t Write into this file to reset the max size (trigger a\n"
5391 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005392#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005393 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5394 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005395#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005396#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005397#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005398 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005399 "\t\t\t Write into this file to define/undefine new trace events.\n"
5400#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005401#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005402 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005403 "\t\t\t Write into this file to define/undefine new trace events.\n"
5404#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005405#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005406 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005407 "\t\t\t Write into this file to define/undefine new trace events.\n"
5408#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005409#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005410 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005411 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5412 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005413#ifdef CONFIG_HIST_TRIGGERS
5414 "\t s:[synthetic/]<event> <field> [<field>]\n"
5415#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005416 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005417#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005418 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu4725cd82020-09-10 17:55:35 +09005419 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005420#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005421#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +09005422 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005423#endif
5424 "\t args: <name>=fetcharg[:type]\n"
5425 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005426#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005427 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005428#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005429 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005430#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005431 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005432 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005433 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005434 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005435#ifdef CONFIG_HIST_TRIGGERS
5436 "\t field: <stype> <name>;\n"
5437 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5438 "\t [unsigned] char/int/long\n"
5439#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005440#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005441 " events/\t\t- Directory containing all trace event subsystems:\n"
5442 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5443 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005444 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5445 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005446 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005447 " events/<system>/<event>/\t- Directory containing control files for\n"
5448 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005449 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5450 " filter\t\t- If set, only events passing filter are traced\n"
5451 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005452 "\t Format: <trigger>[:count][if <filter>]\n"
5453 "\t trigger: traceon, traceoff\n"
5454 "\t enable_event:<system>:<event>\n"
5455 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005456#ifdef CONFIG_HIST_TRIGGERS
5457 "\t enable_hist:<system>:<event>\n"
5458 "\t disable_hist:<system>:<event>\n"
5459#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005460#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005461 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005462#endif
5463#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005464 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005465#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005466#ifdef CONFIG_HIST_TRIGGERS
5467 "\t\t hist (see below)\n"
5468#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005469 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5470 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5471 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5472 "\t events/block/block_unplug/trigger\n"
5473 "\t The first disables tracing every time block_unplug is hit.\n"
5474 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5475 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5476 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5477 "\t Like function triggers, the counter is only decremented if it\n"
5478 "\t enabled or disabled tracing.\n"
5479 "\t To remove a trigger without a count:\n"
5480 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5481 "\t To remove a trigger with a count:\n"
5482 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5483 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005484#ifdef CONFIG_HIST_TRIGGERS
5485 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005486 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005487 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005488 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005489 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005490 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005491 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005492 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005493 "\t [if <filter>]\n\n"
5494 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005495 "\t table using the key(s) and value(s) named, and the value of a\n"
5496 "\t sum called 'hitcount' is incremented. Keys and values\n"
5497 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005498 "\t can be any field, or the special string 'stacktrace'.\n"
5499 "\t Compound keys consisting of up to two fields can be specified\n"
5500 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5501 "\t fields. Sort keys consisting of up to two fields can be\n"
5502 "\t specified using the 'sort' keyword. The sort direction can\n"
5503 "\t be modified by appending '.descending' or '.ascending' to a\n"
5504 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005505 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5506 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5507 "\t its histogram data will be shared with other triggers of the\n"
5508 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005509 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005510 "\t table in its entirety to stdout. If there are multiple hist\n"
5511 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005512 "\t trigger in the output. The table displayed for a named\n"
5513 "\t trigger will be the same as any other instance having the\n"
5514 "\t same name. The default format used to display a given field\n"
5515 "\t can be modified by appending any of the following modifiers\n"
5516 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005517 "\t .hex display a number as a hex value\n"
5518 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005519 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005520 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005521 "\t .syscall display a syscall id as a syscall name\n"
5522 "\t .log2 display log2 value rather than raw number\n"
5523 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005524 "\t The 'pause' parameter can be used to pause an existing hist\n"
5525 "\t trigger or to start a hist trigger but not log any events\n"
5526 "\t until told to do so. 'continue' can be used to start or\n"
5527 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005528 "\t The 'clear' parameter will clear the contents of a running\n"
5529 "\t hist trigger and leave its current paused/active state\n"
5530 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005531 "\t The enable_hist and disable_hist triggers can be used to\n"
5532 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005533 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005534 "\t the enable_event and disable_event triggers.\n\n"
5535 "\t Hist trigger handlers and actions are executed whenever a\n"
5536 "\t a histogram entry is added or updated. They take the form:\n\n"
5537 "\t <handler>.<action>\n\n"
5538 "\t The available handlers are:\n\n"
5539 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005540 "\t onmax(var) - invoke if var exceeds current max\n"
5541 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005542 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005543 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005544 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005545#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussi1bc36bd2020-10-04 17:14:07 -05005546 "\t snapshot() - snapshot the trace buffer\n\n"
5547#endif
5548#ifdef CONFIG_SYNTH_EVENTS
5549 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5550 "\t Write into this file to define/undefine new synthetic events.\n"
5551 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005552#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005553#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005554;
5555
5556static ssize_t
5557tracing_readme_read(struct file *filp, char __user *ubuf,
5558 size_t cnt, loff_t *ppos)
5559{
5560 return simple_read_from_buffer(ubuf, cnt, ppos,
5561 readme_msg, strlen(readme_msg));
5562}
5563
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005564static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005565 .open = tracing_open_generic,
5566 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005567 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005568};
5569
Michael Sartain99c621d2017-07-05 22:07:15 -06005570static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5571{
5572 int *ptr = v;
5573
5574 if (*pos || m->count)
5575 ptr++;
5576
5577 (*pos)++;
5578
5579 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5580 if (trace_find_tgid(*ptr))
5581 return ptr;
5582 }
5583
5584 return NULL;
5585}
5586
5587static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5588{
5589 void *v;
5590 loff_t l = 0;
5591
5592 if (!tgid_map)
5593 return NULL;
5594
5595 v = &tgid_map[0];
5596 while (l <= *pos) {
5597 v = saved_tgids_next(m, v, &l);
5598 if (!v)
5599 return NULL;
5600 }
5601
5602 return v;
5603}
5604
5605static void saved_tgids_stop(struct seq_file *m, void *v)
5606{
5607}
5608
5609static int saved_tgids_show(struct seq_file *m, void *v)
5610{
5611 int pid = (int *)v - tgid_map;
5612
5613 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5614 return 0;
5615}
5616
5617static const struct seq_operations tracing_saved_tgids_seq_ops = {
5618 .start = saved_tgids_start,
5619 .stop = saved_tgids_stop,
5620 .next = saved_tgids_next,
5621 .show = saved_tgids_show,
5622};
5623
5624static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5625{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005626 int ret;
5627
5628 ret = tracing_check_open_get_tr(NULL);
5629 if (ret)
5630 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005631
5632 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5633}
5634
5635
5636static const struct file_operations tracing_saved_tgids_fops = {
5637 .open = tracing_saved_tgids_open,
5638 .read = seq_read,
5639 .llseek = seq_lseek,
5640 .release = seq_release,
5641};
5642
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005643static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005644{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005645 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005646
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005647 if (*pos || m->count)
5648 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005649
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005650 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005651
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005652 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5653 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005654 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005655 continue;
5656
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005657 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005658 }
5659
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005660 return NULL;
5661}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005662
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005663static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5664{
5665 void *v;
5666 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005667
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005668 preempt_disable();
5669 arch_spin_lock(&trace_cmdline_lock);
5670
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005671 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005672 while (l <= *pos) {
5673 v = saved_cmdlines_next(m, v, &l);
5674 if (!v)
5675 return NULL;
5676 }
5677
5678 return v;
5679}
5680
5681static void saved_cmdlines_stop(struct seq_file *m, void *v)
5682{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005683 arch_spin_unlock(&trace_cmdline_lock);
5684 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005685}
5686
5687static int saved_cmdlines_show(struct seq_file *m, void *v)
5688{
5689 char buf[TASK_COMM_LEN];
5690 unsigned int *pid = v;
5691
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005692 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005693 seq_printf(m, "%d %s\n", *pid, buf);
5694 return 0;
5695}
5696
5697static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5698 .start = saved_cmdlines_start,
5699 .next = saved_cmdlines_next,
5700 .stop = saved_cmdlines_stop,
5701 .show = saved_cmdlines_show,
5702};
5703
5704static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5705{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005706 int ret;
5707
5708 ret = tracing_check_open_get_tr(NULL);
5709 if (ret)
5710 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005711
5712 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005713}
5714
5715static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005716 .open = tracing_saved_cmdlines_open,
5717 .read = seq_read,
5718 .llseek = seq_lseek,
5719 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005720};
5721
5722static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005723tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5724 size_t cnt, loff_t *ppos)
5725{
5726 char buf[64];
5727 int r;
5728
5729 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005730 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005731 arch_spin_unlock(&trace_cmdline_lock);
5732
5733 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5734}
5735
5736static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5737{
5738 kfree(s->saved_cmdlines);
5739 kfree(s->map_cmdline_to_pid);
5740 kfree(s);
5741}
5742
5743static int tracing_resize_saved_cmdlines(unsigned int val)
5744{
5745 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5746
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005747 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005748 if (!s)
5749 return -ENOMEM;
5750
5751 if (allocate_cmdlines_buffer(val, s) < 0) {
5752 kfree(s);
5753 return -ENOMEM;
5754 }
5755
5756 arch_spin_lock(&trace_cmdline_lock);
5757 savedcmd_temp = savedcmd;
5758 savedcmd = s;
5759 arch_spin_unlock(&trace_cmdline_lock);
5760 free_saved_cmdlines_buffer(savedcmd_temp);
5761
5762 return 0;
5763}
5764
5765static ssize_t
5766tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5767 size_t cnt, loff_t *ppos)
5768{
5769 unsigned long val;
5770 int ret;
5771
5772 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5773 if (ret)
5774 return ret;
5775
5776 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5777 if (!val || val > PID_MAX_DEFAULT)
5778 return -EINVAL;
5779
5780 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5781 if (ret < 0)
5782 return ret;
5783
5784 *ppos += cnt;
5785
5786 return cnt;
5787}
5788
5789static const struct file_operations tracing_saved_cmdlines_size_fops = {
5790 .open = tracing_open_generic,
5791 .read = tracing_saved_cmdlines_size_read,
5792 .write = tracing_saved_cmdlines_size_write,
5793};
5794
Jeremy Linton681bec02017-05-31 16:56:53 -05005795#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005796static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005797update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005798{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005799 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005800 if (ptr->tail.next) {
5801 ptr = ptr->tail.next;
5802 /* Set ptr to the next real item (skip head) */
5803 ptr++;
5804 } else
5805 return NULL;
5806 }
5807 return ptr;
5808}
5809
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005810static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005811{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005812 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005813
5814 /*
5815 * Paranoid! If ptr points to end, we don't want to increment past it.
5816 * This really should never happen.
5817 */
Vasily Averin039958a2020-01-24 10:03:01 +03005818 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005819 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005820 if (WARN_ON_ONCE(!ptr))
5821 return NULL;
5822
5823 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005824 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005825
5826 return ptr;
5827}
5828
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005829static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005830{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005831 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005832 loff_t l = 0;
5833
Jeremy Linton1793ed92017-05-31 16:56:46 -05005834 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005835
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005836 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005837 if (v)
5838 v++;
5839
5840 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005841 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005842 }
5843
5844 return v;
5845}
5846
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005847static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005848{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005849 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005850}
5851
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005852static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005853{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005854 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005855
5856 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005857 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005858 ptr->map.system);
5859
5860 return 0;
5861}
5862
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005863static const struct seq_operations tracing_eval_map_seq_ops = {
5864 .start = eval_map_start,
5865 .next = eval_map_next,
5866 .stop = eval_map_stop,
5867 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005868};
5869
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005870static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005871{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005872 int ret;
5873
5874 ret = tracing_check_open_get_tr(NULL);
5875 if (ret)
5876 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005877
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005878 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005879}
5880
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005881static const struct file_operations tracing_eval_map_fops = {
5882 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005883 .read = seq_read,
5884 .llseek = seq_lseek,
5885 .release = seq_release,
5886};
5887
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005888static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005889trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005890{
5891 /* Return tail of array given the head */
5892 return ptr + ptr->head.length + 1;
5893}
5894
5895static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005896trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005897 int len)
5898{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005899 struct trace_eval_map **stop;
5900 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005901 union trace_eval_map_item *map_array;
5902 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005903
5904 stop = start + len;
5905
5906 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005907 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005908 * where the head holds the module and length of array, and the
5909 * tail holds a pointer to the next list.
5910 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005911 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005912 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005913 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005914 return;
5915 }
5916
Jeremy Linton1793ed92017-05-31 16:56:46 -05005917 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005918
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005919 if (!trace_eval_maps)
5920 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005921 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005922 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005923 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005924 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005925 if (!ptr->tail.next)
5926 break;
5927 ptr = ptr->tail.next;
5928
5929 }
5930 ptr->tail.next = map_array;
5931 }
5932 map_array->head.mod = mod;
5933 map_array->head.length = len;
5934 map_array++;
5935
5936 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5937 map_array->map = **map;
5938 map_array++;
5939 }
5940 memset(map_array, 0, sizeof(*map_array));
5941
Jeremy Linton1793ed92017-05-31 16:56:46 -05005942 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005943}
5944
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005945static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005946{
Jeremy Linton681bec02017-05-31 16:56:53 -05005947 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005948 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005949}
5950
Jeremy Linton681bec02017-05-31 16:56:53 -05005951#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005952static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5953static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005954 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05005955#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005956
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005957static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05005958 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005959{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005960 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005961
5962 if (len <= 0)
5963 return;
5964
5965 map = start;
5966
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005967 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005968
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005969 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04005970}
5971
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005972static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005973tracing_set_trace_read(struct file *filp, char __user *ubuf,
5974 size_t cnt, loff_t *ppos)
5975{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005976 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08005977 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005978 int r;
5979
5980 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005981 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005982 mutex_unlock(&trace_types_lock);
5983
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005984 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005985}
5986
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005987int tracer_init(struct tracer *t, struct trace_array *tr)
5988{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005989 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02005990 return t->init(tr);
5991}
5992
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005993static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005994{
5995 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005996
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005997 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005998 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005999}
6000
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006001#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09006002/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006003static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6004 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09006005{
6006 int cpu, ret = 0;
6007
6008 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6009 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006010 ret = ring_buffer_resize(trace_buf->buffer,
6011 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006012 if (ret < 0)
6013 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006014 per_cpu_ptr(trace_buf->data, cpu)->entries =
6015 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09006016 }
6017 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006018 ret = ring_buffer_resize(trace_buf->buffer,
6019 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006020 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006021 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6022 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09006023 }
6024
6025 return ret;
6026}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006027#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09006028
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006029static int __tracing_resize_ring_buffer(struct trace_array *tr,
6030 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04006031{
6032 int ret;
6033
6034 /*
6035 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04006036 * we use the size that was given, and we can forget about
6037 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04006038 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006039 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04006040
Steven Rostedtb382ede62012-10-10 21:44:34 -04006041 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006042 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04006043 return 0;
6044
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006045 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006046 if (ret < 0)
6047 return ret;
6048
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006049#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006050 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6051 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006052 goto out;
6053
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006054 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006055 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006056 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6057 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006058 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04006059 /*
6060 * AARGH! We are left with different
6061 * size max buffer!!!!
6062 * The max buffer is our "snapshot" buffer.
6063 * When a tracer needs a snapshot (one of the
6064 * latency tracers), it swaps the max buffer
6065 * with the saved snap shot. We succeeded to
6066 * update the size of the main buffer, but failed to
6067 * update the size of the max buffer. But when we tried
6068 * to reset the main buffer to the original size, we
6069 * failed there too. This is very unlikely to
6070 * happen, but if it does, warn and kill all
6071 * tracing.
6072 */
Steven Rostedt73c51622009-03-11 13:42:01 -04006073 WARN_ON(1);
6074 tracing_disabled = 1;
6075 }
6076 return ret;
6077 }
6078
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006079 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006080 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006081 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006082 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006083
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006084 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006085#endif /* CONFIG_TRACER_MAX_TRACE */
6086
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006087 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006088 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006089 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006090 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04006091
6092 return ret;
6093}
6094
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09006095ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6096 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006097{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07006098 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006099
6100 mutex_lock(&trace_types_lock);
6101
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006102 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6103 /* make sure, this cpu is enabled in the mask */
6104 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6105 ret = -EINVAL;
6106 goto out;
6107 }
6108 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006109
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006110 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006111 if (ret < 0)
6112 ret = -ENOMEM;
6113
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006114out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006115 mutex_unlock(&trace_types_lock);
6116
6117 return ret;
6118}
6119
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006120
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006121/**
6122 * tracing_update_buffers - used by tracing facility to expand ring buffers
6123 *
6124 * To save on memory when the tracing is never used on a system with it
6125 * configured in. The ring buffers are set to a minimum size. But once
6126 * a user starts to use the tracing facility, then they need to grow
6127 * to their default size.
6128 *
6129 * This function is to be called when a tracer is about to be used.
6130 */
6131int tracing_update_buffers(void)
6132{
6133 int ret = 0;
6134
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006135 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006136 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006137 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006138 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006139 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006140
6141 return ret;
6142}
6143
Steven Rostedt577b7852009-02-26 23:43:05 -05006144struct trace_option_dentry;
6145
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006146static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006147create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05006148
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006149/*
6150 * Used to clear out the tracer before deletion of an instance.
6151 * Must have trace_types_lock held.
6152 */
6153static void tracing_set_nop(struct trace_array *tr)
6154{
6155 if (tr->current_trace == &nop_trace)
6156 return;
6157
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006158 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006159
6160 if (tr->current_trace->reset)
6161 tr->current_trace->reset(tr);
6162
6163 tr->current_trace = &nop_trace;
6164}
6165
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006166static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006167{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006168 /* Only enable if the directory has been created already. */
6169 if (!tr->dir)
6170 return;
6171
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006172 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006173}
6174
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09006175int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006176{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006177 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006178#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05006179 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006180#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006181 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006182
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006183 mutex_lock(&trace_types_lock);
6184
Steven Rostedt73c51622009-03-11 13:42:01 -04006185 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006186 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006187 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04006188 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01006189 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04006190 ret = 0;
6191 }
6192
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006193 for (t = trace_types; t; t = t->next) {
6194 if (strcmp(t->name, buf) == 0)
6195 break;
6196 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006197 if (!t) {
6198 ret = -EINVAL;
6199 goto out;
6200 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006201 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006202 goto out;
6203
Tom Zanussia35873a2019-02-13 17:42:45 -06006204#ifdef CONFIG_TRACER_SNAPSHOT
6205 if (t->use_max_tr) {
6206 arch_spin_lock(&tr->max_lock);
6207 if (tr->cond_snapshot)
6208 ret = -EBUSY;
6209 arch_spin_unlock(&tr->max_lock);
6210 if (ret)
6211 goto out;
6212 }
6213#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08006214 /* Some tracers won't work on kernel command line */
6215 if (system_state < SYSTEM_RUNNING && t->noboot) {
6216 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6217 t->name);
6218 goto out;
6219 }
6220
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006221 /* Some tracers are only allowed for the top level buffer */
6222 if (!trace_ok_for_array(t, tr)) {
6223 ret = -EINVAL;
6224 goto out;
6225 }
6226
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006227 /* If trace pipe files are being read, we can't change the tracer */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006228 if (tr->trace_ref) {
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006229 ret = -EBUSY;
6230 goto out;
6231 }
6232
Steven Rostedt9f029e82008-11-12 15:24:24 -05006233 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006234
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006235 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006236
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006237 if (tr->current_trace->reset)
6238 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05006239
Paul E. McKenney74401722018-11-06 18:44:52 -08006240 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006241 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05006242
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006243#ifdef CONFIG_TRACER_MAX_TRACE
6244 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05006245
6246 if (had_max_tr && !t->use_max_tr) {
6247 /*
6248 * We need to make sure that the update_max_tr sees that
6249 * current_trace changed to nop_trace to keep it from
6250 * swapping the buffers after we resize it.
6251 * The update_max_tr is called from interrupts disabled
6252 * so a synchronized_sched() is sufficient.
6253 */
Paul E. McKenney74401722018-11-06 18:44:52 -08006254 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006255 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006256 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006257#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006258
6259#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05006260 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006261 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006262 if (ret < 0)
6263 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006264 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006265#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05006266
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006267 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006268 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006269 if (ret)
6270 goto out;
6271 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006272
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006273 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006274 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05006275 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006276 out:
6277 mutex_unlock(&trace_types_lock);
6278
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006279 return ret;
6280}
6281
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006282static ssize_t
6283tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6284 size_t cnt, loff_t *ppos)
6285{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006286 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006287 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006288 int i;
6289 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006290 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006291
Steven Rostedt60063a62008-10-28 10:44:24 -04006292 ret = cnt;
6293
Li Zefanee6c2c12009-09-18 14:06:47 +08006294 if (cnt > MAX_TRACER_SIZE)
6295 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006296
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006297 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006298 return -EFAULT;
6299
6300 buf[cnt] = 0;
6301
6302 /* strip ending whitespace. */
6303 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6304 buf[i] = 0;
6305
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006306 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006307 if (err)
6308 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006309
Jiri Olsacf8517c2009-10-23 19:36:16 -04006310 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006311
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006312 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006313}
6314
6315static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006316tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6317 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006318{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006319 char buf[64];
6320 int r;
6321
Steven Rostedtcffae432008-05-12 21:21:00 +02006322 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006323 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02006324 if (r > sizeof(buf))
6325 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006326 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006327}
6328
6329static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006330tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6331 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006332{
Hannes Eder5e398412009-02-10 19:44:34 +01006333 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006334 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006335
Peter Huewe22fe9b52011-06-07 21:58:27 +02006336 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6337 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006338 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006339
6340 *ptr = val * 1000;
6341
6342 return cnt;
6343}
6344
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006345static ssize_t
6346tracing_thresh_read(struct file *filp, char __user *ubuf,
6347 size_t cnt, loff_t *ppos)
6348{
6349 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6350}
6351
6352static ssize_t
6353tracing_thresh_write(struct file *filp, const char __user *ubuf,
6354 size_t cnt, loff_t *ppos)
6355{
6356 struct trace_array *tr = filp->private_data;
6357 int ret;
6358
6359 mutex_lock(&trace_types_lock);
6360 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6361 if (ret < 0)
6362 goto out;
6363
6364 if (tr->current_trace->update_thresh) {
6365 ret = tr->current_trace->update_thresh(tr);
6366 if (ret < 0)
6367 goto out;
6368 }
6369
6370 ret = cnt;
6371out:
6372 mutex_unlock(&trace_types_lock);
6373
6374 return ret;
6375}
6376
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006377#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006378
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006379static ssize_t
6380tracing_max_lat_read(struct file *filp, char __user *ubuf,
6381 size_t cnt, loff_t *ppos)
6382{
6383 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6384}
6385
6386static ssize_t
6387tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6388 size_t cnt, loff_t *ppos)
6389{
6390 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6391}
6392
Chen Gange428abb2015-11-10 05:15:15 +08006393#endif
6394
Steven Rostedtb3806b42008-05-12 21:20:46 +02006395static int tracing_open_pipe(struct inode *inode, struct file *filp)
6396{
Oleg Nesterov15544202013-07-23 17:25:57 +02006397 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006398 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006399 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006400
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006401 ret = tracing_check_open_get_tr(tr);
6402 if (ret)
6403 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006404
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006405 mutex_lock(&trace_types_lock);
6406
Steven Rostedtb3806b42008-05-12 21:20:46 +02006407 /* create a buffer to store the information to pass to userspace */
6408 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006409 if (!iter) {
6410 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006411 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006412 goto out;
6413 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006414
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006415 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006416 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006417
6418 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6419 ret = -ENOMEM;
6420 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306421 }
6422
Steven Rostedta3097202008-11-07 22:36:02 -05006423 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306424 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006425
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006426 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006427 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6428
David Sharp8be07092012-11-13 12:18:22 -08006429 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006430 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006431 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6432
Oleg Nesterov15544202013-07-23 17:25:57 +02006433 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006434 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006435 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006436 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006437 filp->private_data = iter;
6438
Steven Rostedt107bad82008-05-12 21:21:01 +02006439 if (iter->trace->pipe_open)
6440 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006441
Arnd Bergmannb4447862010-07-07 23:40:11 +02006442 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006443
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006444 tr->trace_ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006445out:
6446 mutex_unlock(&trace_types_lock);
6447 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006448
6449fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006450 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006451 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006452 mutex_unlock(&trace_types_lock);
6453 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006454}
6455
6456static int tracing_release_pipe(struct inode *inode, struct file *file)
6457{
6458 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006459 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006460
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006461 mutex_lock(&trace_types_lock);
6462
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006463 tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006464
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006465 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006466 iter->trace->pipe_close(iter);
6467
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006468 mutex_unlock(&trace_types_lock);
6469
Rusty Russell44623442009-01-01 10:12:23 +10306470 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006471 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006472 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006473
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006474 trace_array_put(tr);
6475
Steven Rostedtb3806b42008-05-12 21:20:46 +02006476 return 0;
6477}
6478
Al Viro9dd95742017-07-03 00:42:43 -04006479static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006480trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006481{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006482 struct trace_array *tr = iter->tr;
6483
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006484 /* Iterators are static, they should be filled or empty */
6485 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006486 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006487
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006488 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006489 /*
6490 * Always select as readable when in blocking mode
6491 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006492 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006493 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006494 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006495 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006496}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006497
Al Viro9dd95742017-07-03 00:42:43 -04006498static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006499tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6500{
6501 struct trace_iterator *iter = filp->private_data;
6502
6503 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006504}
6505
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006506/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006507static int tracing_wait_pipe(struct file *filp)
6508{
6509 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006510 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006511
6512 while (trace_empty(iter)) {
6513
6514 if ((filp->f_flags & O_NONBLOCK)) {
6515 return -EAGAIN;
6516 }
6517
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006518 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006519 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006520 * We still block if tracing is disabled, but we have never
6521 * read anything. This allows a user to cat this file, and
6522 * then enable tracing. But after we have read something,
6523 * we give an EOF when tracing is again disabled.
6524 *
6525 * iter->pos will be 0 if we haven't read anything.
6526 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006527 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006528 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006529
6530 mutex_unlock(&iter->mutex);
6531
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006532 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006533
6534 mutex_lock(&iter->mutex);
6535
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006536 if (ret)
6537 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006538 }
6539
6540 return 1;
6541}
6542
Steven Rostedtb3806b42008-05-12 21:20:46 +02006543/*
6544 * Consumer reader.
6545 */
6546static ssize_t
6547tracing_read_pipe(struct file *filp, char __user *ubuf,
6548 size_t cnt, loff_t *ppos)
6549{
6550 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006551 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006552
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006553 /*
6554 * Avoid more than one consumer on a single file descriptor
6555 * This is just a matter of traces coherency, the ring buffer itself
6556 * is protected.
6557 */
6558 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006559
6560 /* return any leftover data */
6561 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6562 if (sret != -EBUSY)
6563 goto out;
6564
6565 trace_seq_init(&iter->seq);
6566
Steven Rostedt107bad82008-05-12 21:21:01 +02006567 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006568 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6569 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006570 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006571 }
6572
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006573waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006574 sret = tracing_wait_pipe(filp);
6575 if (sret <= 0)
6576 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006577
6578 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006579 if (trace_empty(iter)) {
6580 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006581 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006582 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006583
6584 if (cnt >= PAGE_SIZE)
6585 cnt = PAGE_SIZE - 1;
6586
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006587 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006588 memset(&iter->seq, 0,
6589 sizeof(struct trace_iterator) -
6590 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006591 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006592 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006593 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006594
Lai Jiangshan4f535962009-05-18 19:35:34 +08006595 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006596 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006597 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006598 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006599 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006600
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006601 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006602 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006603 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006604 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006605 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006606 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006607 if (ret != TRACE_TYPE_NO_CONSUME)
6608 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006609
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006610 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006611 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006612
6613 /*
6614 * Setting the full flag means we reached the trace_seq buffer
6615 * size and we should leave by partial output condition above.
6616 * One of the trace_seq_* functions is not used properly.
6617 */
6618 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6619 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006620 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006621 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006622 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006623
Steven Rostedtb3806b42008-05-12 21:20:46 +02006624 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006625 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006626 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006627 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006628
6629 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006630 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006631 * entries, go back to wait for more entries.
6632 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006633 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006634 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006635
Steven Rostedt107bad82008-05-12 21:21:01 +02006636out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006637 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006638
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006639 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006640}
6641
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006642static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6643 unsigned int idx)
6644{
6645 __free_page(spd->pages[idx]);
6646}
6647
Steven Rostedt34cd4992009-02-09 12:06:29 -05006648static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006649tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006650{
6651 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006652 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006653 int ret;
6654
6655 /* Seq buffer is page-sized, exactly what we need. */
6656 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006657 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006658 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006659
6660 if (trace_seq_has_overflowed(&iter->seq)) {
6661 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006662 break;
6663 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006664
6665 /*
6666 * This should not be hit, because it should only
6667 * be set if the iter->seq overflowed. But check it
6668 * anyway to be safe.
6669 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006670 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006671 iter->seq.seq.len = save_len;
6672 break;
6673 }
6674
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006675 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006676 if (rem < count) {
6677 rem = 0;
6678 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006679 break;
6680 }
6681
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006682 if (ret != TRACE_TYPE_NO_CONSUME)
6683 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006684 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006685 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006686 rem = 0;
6687 iter->ent = NULL;
6688 break;
6689 }
6690 }
6691
6692 return rem;
6693}
6694
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006695static ssize_t tracing_splice_read_pipe(struct file *filp,
6696 loff_t *ppos,
6697 struct pipe_inode_info *pipe,
6698 size_t len,
6699 unsigned int flags)
6700{
Jens Axboe35f3d142010-05-20 10:43:18 +02006701 struct page *pages_def[PIPE_DEF_BUFFERS];
6702 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006703 struct trace_iterator *iter = filp->private_data;
6704 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006705 .pages = pages_def,
6706 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006707 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006708 .nr_pages_max = PIPE_DEF_BUFFERS,
Christoph Hellwig6797d972020-05-20 17:58:13 +02006709 .ops = &default_pipe_buf_ops,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006710 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006711 };
6712 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006713 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006714 unsigned int i;
6715
Jens Axboe35f3d142010-05-20 10:43:18 +02006716 if (splice_grow_spd(pipe, &spd))
6717 return -ENOMEM;
6718
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006719 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006720
6721 if (iter->trace->splice_read) {
6722 ret = iter->trace->splice_read(iter, filp,
6723 ppos, pipe, len, flags);
6724 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006725 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006726 }
6727
6728 ret = tracing_wait_pipe(filp);
6729 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006730 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006731
Jason Wessel955b61e2010-08-05 09:22:23 -05006732 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006733 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006734 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006735 }
6736
Lai Jiangshan4f535962009-05-18 19:35:34 +08006737 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006738 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006739
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006740 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006741 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006742 spd.pages[i] = alloc_page(GFP_KERNEL);
6743 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006744 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006745
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006746 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006747
6748 /* Copy the data into the page, so we can start over. */
6749 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006750 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006751 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006752 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006753 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006754 break;
6755 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006756 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006757 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006758
Steven Rostedtf9520752009-03-02 14:04:40 -05006759 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006760 }
6761
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006762 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006763 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006764 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006765
6766 spd.nr_pages = i;
6767
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006768 if (i)
6769 ret = splice_to_pipe(pipe, &spd);
6770 else
6771 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006772out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006773 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006774 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006775
Steven Rostedt34cd4992009-02-09 12:06:29 -05006776out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006777 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006778 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006779}
6780
Steven Rostedta98a3c32008-05-12 21:20:59 +02006781static ssize_t
6782tracing_entries_read(struct file *filp, char __user *ubuf,
6783 size_t cnt, loff_t *ppos)
6784{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006785 struct inode *inode = file_inode(filp);
6786 struct trace_array *tr = inode->i_private;
6787 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006788 char buf[64];
6789 int r = 0;
6790 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006791
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006792 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006793
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006794 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006795 int cpu, buf_size_same;
6796 unsigned long size;
6797
6798 size = 0;
6799 buf_size_same = 1;
6800 /* check if all cpu sizes are same */
6801 for_each_tracing_cpu(cpu) {
6802 /* fill in the size from first enabled cpu */
6803 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006804 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6805 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006806 buf_size_same = 0;
6807 break;
6808 }
6809 }
6810
6811 if (buf_size_same) {
6812 if (!ring_buffer_expanded)
6813 r = sprintf(buf, "%lu (expanded: %lu)\n",
6814 size >> 10,
6815 trace_buf_size >> 10);
6816 else
6817 r = sprintf(buf, "%lu\n", size >> 10);
6818 } else
6819 r = sprintf(buf, "X\n");
6820 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006821 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006822
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006823 mutex_unlock(&trace_types_lock);
6824
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006825 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6826 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006827}
6828
6829static ssize_t
6830tracing_entries_write(struct file *filp, const char __user *ubuf,
6831 size_t cnt, loff_t *ppos)
6832{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006833 struct inode *inode = file_inode(filp);
6834 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006835 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006836 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006837
Peter Huewe22fe9b52011-06-07 21:58:27 +02006838 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6839 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006840 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006841
6842 /* must have at least 1 entry */
6843 if (!val)
6844 return -EINVAL;
6845
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006846 /* value is in KB */
6847 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006848 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006849 if (ret < 0)
6850 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006851
Jiri Olsacf8517c2009-10-23 19:36:16 -04006852 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006853
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006854 return cnt;
6855}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006856
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006857static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006858tracing_total_entries_read(struct file *filp, char __user *ubuf,
6859 size_t cnt, loff_t *ppos)
6860{
6861 struct trace_array *tr = filp->private_data;
6862 char buf[64];
6863 int r, cpu;
6864 unsigned long size = 0, expanded_size = 0;
6865
6866 mutex_lock(&trace_types_lock);
6867 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006868 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006869 if (!ring_buffer_expanded)
6870 expanded_size += trace_buf_size >> 10;
6871 }
6872 if (ring_buffer_expanded)
6873 r = sprintf(buf, "%lu\n", size);
6874 else
6875 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6876 mutex_unlock(&trace_types_lock);
6877
6878 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6879}
6880
6881static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006882tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6883 size_t cnt, loff_t *ppos)
6884{
6885 /*
6886 * There is no need to read what the user has written, this function
6887 * is just to make sure that there is no error when "echo" is used
6888 */
6889
6890 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006891
6892 return cnt;
6893}
6894
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006895static int
6896tracing_free_buffer_release(struct inode *inode, struct file *filp)
6897{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006898 struct trace_array *tr = inode->i_private;
6899
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006900 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006901 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006902 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006903 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006904 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006905
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006906 trace_array_put(tr);
6907
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006908 return 0;
6909}
6910
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006911static ssize_t
6912tracing_mark_write(struct file *filp, const char __user *ubuf,
6913 size_t cnt, loff_t *fpos)
6914{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006915 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006916 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006917 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006918 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04006919 struct print_entry *entry;
Steven Rostedtd696b582011-09-22 11:50:27 -04006920 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006921 int size;
6922 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006923
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006924/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006925#define FAULTED_STR "<faulted>"
6926#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006927
Steven Rostedtc76f0692008-11-07 22:36:02 -05006928 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006929 return -EINVAL;
6930
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006931 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006932 return -EINVAL;
6933
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006934 if (cnt > TRACE_BUF_SIZE)
6935 cnt = TRACE_BUF_SIZE;
6936
Steven Rostedtd696b582011-09-22 11:50:27 -04006937 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006938
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006939 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6940
6941 /* If less than "<faulted>", then make sure we can still add that */
6942 if (cnt < FAULTED_SIZE)
6943 size += FAULTED_SIZE - cnt;
6944
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006945 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006946 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01006947 tracing_gen_ctx());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006948 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006949 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006950 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04006951
6952 entry = ring_buffer_event_data(event);
6953 entry->ip = _THIS_IP_;
6954
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006955 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6956 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006957 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006958 cnt = FAULTED_SIZE;
6959 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04006960 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006961 written = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04006962
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006963 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6964 /* do not add \n before testing triggers, but add \0 */
6965 entry->buf[cnt] = '\0';
Steven Rostedt (VMware)b47e3302021-03-16 12:41:03 -04006966 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006967 }
6968
Steven Rostedtd696b582011-09-22 11:50:27 -04006969 if (entry->buf[cnt - 1] != '\n') {
6970 entry->buf[cnt] = '\n';
6971 entry->buf[cnt + 1] = '\0';
6972 } else
6973 entry->buf[cnt] = '\0';
6974
Tingwei Zhang458999c2020-10-05 10:13:15 +03006975 if (static_branch_unlikely(&trace_marker_exports_enabled))
6976 ftrace_exports(event, TRACE_EXPORT_MARKER);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04006977 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04006978
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006979 if (tt)
6980 event_triggers_post_call(tr->trace_marker_file, tt);
6981
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006982 if (written > 0)
6983 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006984
Steven Rostedtfa32e852016-07-06 15:25:08 -04006985 return written;
6986}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006987
Steven Rostedtfa32e852016-07-06 15:25:08 -04006988/* Limit it for now to 3K (including tag) */
6989#define RAW_DATA_MAX_SIZE (1024*3)
6990
6991static ssize_t
6992tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6993 size_t cnt, loff_t *fpos)
6994{
6995 struct trace_array *tr = filp->private_data;
6996 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006997 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006998 struct raw_data_entry *entry;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006999 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007000 int size;
7001 int len;
7002
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007003#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7004
Steven Rostedtfa32e852016-07-06 15:25:08 -04007005 if (tracing_disabled)
7006 return -EINVAL;
7007
7008 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7009 return -EINVAL;
7010
7011 /* The marker must at least have a tag id */
7012 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7013 return -EINVAL;
7014
7015 if (cnt > TRACE_BUF_SIZE)
7016 cnt = TRACE_BUF_SIZE;
7017
7018 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7019
Steven Rostedtfa32e852016-07-06 15:25:08 -04007020 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007021 if (cnt < FAULT_SIZE_ID)
7022 size += FAULT_SIZE_ID - cnt;
7023
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007024 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05007025 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01007026 tracing_gen_ctx());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007027 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04007028 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007029 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007030
7031 entry = ring_buffer_event_data(event);
7032
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007033 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7034 if (len) {
7035 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007036 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007037 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007038 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007039 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007040
7041 __buffer_unlock_commit(buffer, event);
7042
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007043 if (written > 0)
7044 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007045
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02007046 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007047}
7048
Li Zefan13f16d22009-12-08 11:16:11 +08007049static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08007050{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007051 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08007052 int i;
7053
7054 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08007055 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08007056 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007057 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7058 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08007059 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08007060
Li Zefan13f16d22009-12-08 11:16:11 +08007061 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08007062}
7063
Tom Zanussid71bd342018-01-15 20:52:07 -06007064int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08007065{
Zhaolei5079f322009-08-25 16:12:56 +08007066 int i;
7067
Zhaolei5079f322009-08-25 16:12:56 +08007068 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7069 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7070 break;
7071 }
7072 if (i == ARRAY_SIZE(trace_clocks))
7073 return -EINVAL;
7074
Zhaolei5079f322009-08-25 16:12:56 +08007075 mutex_lock(&trace_types_lock);
7076
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007077 tr->clock_id = i;
7078
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007079 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08007080
David Sharp60303ed2012-10-11 16:27:52 -07007081 /*
7082 * New clock may not be consistent with the previous clock.
7083 * Reset the buffer so that it doesn't have incomparable timestamps.
7084 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007085 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007086
7087#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05007088 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007089 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07007090 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007091#endif
David Sharp60303ed2012-10-11 16:27:52 -07007092
Zhaolei5079f322009-08-25 16:12:56 +08007093 mutex_unlock(&trace_types_lock);
7094
Steven Rostedte1e232c2014-02-10 23:38:46 -05007095 return 0;
7096}
7097
7098static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7099 size_t cnt, loff_t *fpos)
7100{
7101 struct seq_file *m = filp->private_data;
7102 struct trace_array *tr = m->private;
7103 char buf[64];
7104 const char *clockstr;
7105 int ret;
7106
7107 if (cnt >= sizeof(buf))
7108 return -EINVAL;
7109
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08007110 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05007111 return -EFAULT;
7112
7113 buf[cnt] = 0;
7114
7115 clockstr = strstrip(buf);
7116
7117 ret = tracing_set_clock(tr, clockstr);
7118 if (ret)
7119 return ret;
7120
Zhaolei5079f322009-08-25 16:12:56 +08007121 *fpos += cnt;
7122
7123 return cnt;
7124}
7125
Li Zefan13f16d22009-12-08 11:16:11 +08007126static int tracing_clock_open(struct inode *inode, struct file *file)
7127{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007128 struct trace_array *tr = inode->i_private;
7129 int ret;
7130
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007131 ret = tracing_check_open_get_tr(tr);
7132 if (ret)
7133 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007134
7135 ret = single_open(file, tracing_clock_show, inode->i_private);
7136 if (ret < 0)
7137 trace_array_put(tr);
7138
7139 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08007140}
7141
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007142static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7143{
7144 struct trace_array *tr = m->private;
7145
7146 mutex_lock(&trace_types_lock);
7147
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007148 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007149 seq_puts(m, "delta [absolute]\n");
7150 else
7151 seq_puts(m, "[delta] absolute\n");
7152
7153 mutex_unlock(&trace_types_lock);
7154
7155 return 0;
7156}
7157
7158static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7159{
7160 struct trace_array *tr = inode->i_private;
7161 int ret;
7162
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007163 ret = tracing_check_open_get_tr(tr);
7164 if (ret)
7165 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007166
7167 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7168 if (ret < 0)
7169 trace_array_put(tr);
7170
7171 return ret;
7172}
7173
Steven Rostedt (VMware)d8279bfc2021-03-16 12:41:07 -04007174u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7175{
7176 if (rbe == this_cpu_read(trace_buffered_event))
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03007177 return ring_buffer_time_stamp(buffer);
Steven Rostedt (VMware)d8279bfc2021-03-16 12:41:07 -04007178
7179 return ring_buffer_event_time_stamp(buffer, rbe);
7180}
7181
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007182/*
7183 * Set or disable using the per CPU trace_buffer_event when possible.
7184 */
7185int tracing_set_filter_buffering(struct trace_array *tr, bool set)
Tom Zanussi00b41452018-01-15 20:51:39 -06007186{
7187 int ret = 0;
7188
7189 mutex_lock(&trace_types_lock);
7190
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007191 if (set && tr->no_filter_buffering_ref++)
Tom Zanussi00b41452018-01-15 20:51:39 -06007192 goto out;
7193
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007194 if (!set) {
7195 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
Tom Zanussi00b41452018-01-15 20:51:39 -06007196 ret = -EINVAL;
7197 goto out;
7198 }
7199
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007200 --tr->no_filter_buffering_ref;
Tom Zanussi00b41452018-01-15 20:51:39 -06007201 }
Tom Zanussi00b41452018-01-15 20:51:39 -06007202 out:
7203 mutex_unlock(&trace_types_lock);
7204
7205 return ret;
7206}
7207
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007208struct ftrace_buffer_info {
7209 struct trace_iterator iter;
7210 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007211 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007212 unsigned int read;
7213};
7214
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007215#ifdef CONFIG_TRACER_SNAPSHOT
7216static int tracing_snapshot_open(struct inode *inode, struct file *file)
7217{
Oleg Nesterov6484c712013-07-23 17:26:10 +02007218 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007219 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007220 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007221 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007222
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007223 ret = tracing_check_open_get_tr(tr);
7224 if (ret)
7225 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007226
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007227 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02007228 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007229 if (IS_ERR(iter))
7230 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007231 } else {
7232 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007233 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007234 m = kzalloc(sizeof(*m), GFP_KERNEL);
7235 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007236 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007237 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7238 if (!iter) {
7239 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007240 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007241 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007242 ret = 0;
7243
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007244 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007245 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02007246 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007247 m->private = iter;
7248 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007249 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007250out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007251 if (ret < 0)
7252 trace_array_put(tr);
7253
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007254 return ret;
7255}
7256
7257static ssize_t
7258tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7259 loff_t *ppos)
7260{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007261 struct seq_file *m = filp->private_data;
7262 struct trace_iterator *iter = m->private;
7263 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007264 unsigned long val;
7265 int ret;
7266
7267 ret = tracing_update_buffers();
7268 if (ret < 0)
7269 return ret;
7270
7271 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7272 if (ret)
7273 return ret;
7274
7275 mutex_lock(&trace_types_lock);
7276
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007277 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007278 ret = -EBUSY;
7279 goto out;
7280 }
7281
Tom Zanussia35873a2019-02-13 17:42:45 -06007282 arch_spin_lock(&tr->max_lock);
7283 if (tr->cond_snapshot)
7284 ret = -EBUSY;
7285 arch_spin_unlock(&tr->max_lock);
7286 if (ret)
7287 goto out;
7288
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007289 switch (val) {
7290 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007291 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7292 ret = -EINVAL;
7293 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007294 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04007295 if (tr->allocated_snapshot)
7296 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007297 break;
7298 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007299/* Only allow per-cpu swap if the ring buffer supports it */
7300#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7301 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7302 ret = -EINVAL;
7303 break;
7304 }
7305#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007306 if (tr->allocated_snapshot)
7307 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007308 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007309 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007310 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007311 if (ret < 0)
7312 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007313 local_irq_disable();
7314 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007315 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06007316 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007317 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007318 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007319 local_irq_enable();
7320 break;
7321 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05007322 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007323 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7324 tracing_reset_online_cpus(&tr->max_buffer);
7325 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04007326 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007327 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007328 break;
7329 }
7330
7331 if (ret >= 0) {
7332 *ppos += cnt;
7333 ret = cnt;
7334 }
7335out:
7336 mutex_unlock(&trace_types_lock);
7337 return ret;
7338}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007339
7340static int tracing_snapshot_release(struct inode *inode, struct file *file)
7341{
7342 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007343 int ret;
7344
7345 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007346
7347 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007348 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007349
7350 /* If write only, the seq_file is just a stub */
7351 if (m)
7352 kfree(m->private);
7353 kfree(m);
7354
7355 return 0;
7356}
7357
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007358static int tracing_buffers_open(struct inode *inode, struct file *filp);
7359static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7360 size_t count, loff_t *ppos);
7361static int tracing_buffers_release(struct inode *inode, struct file *file);
7362static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7363 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7364
7365static int snapshot_raw_open(struct inode *inode, struct file *filp)
7366{
7367 struct ftrace_buffer_info *info;
7368 int ret;
7369
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007370 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007371 ret = tracing_buffers_open(inode, filp);
7372 if (ret < 0)
7373 return ret;
7374
7375 info = filp->private_data;
7376
7377 if (info->iter.trace->use_max_tr) {
7378 tracing_buffers_release(inode, filp);
7379 return -EBUSY;
7380 }
7381
7382 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007383 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007384
7385 return ret;
7386}
7387
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007388#endif /* CONFIG_TRACER_SNAPSHOT */
7389
7390
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007391static const struct file_operations tracing_thresh_fops = {
7392 .open = tracing_open_generic,
7393 .read = tracing_thresh_read,
7394 .write = tracing_thresh_write,
7395 .llseek = generic_file_llseek,
7396};
7397
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007398#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007399static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007400 .open = tracing_open_generic,
7401 .read = tracing_max_lat_read,
7402 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007403 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007404};
Chen Gange428abb2015-11-10 05:15:15 +08007405#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007406
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007407static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007408 .open = tracing_open_generic,
7409 .read = tracing_set_trace_read,
7410 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007411 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007412};
7413
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007414static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007415 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007416 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007417 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007418 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007419 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007420 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007421};
7422
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007423static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007424 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007425 .read = tracing_entries_read,
7426 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007427 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007428 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007429};
7430
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007431static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007432 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007433 .read = tracing_total_entries_read,
7434 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007435 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007436};
7437
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007438static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007439 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007440 .write = tracing_free_buffer_write,
7441 .release = tracing_free_buffer_release,
7442};
7443
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007444static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007445 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007446 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007447 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007448 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007449};
7450
Steven Rostedtfa32e852016-07-06 15:25:08 -04007451static const struct file_operations tracing_mark_raw_fops = {
7452 .open = tracing_open_generic_tr,
7453 .write = tracing_mark_raw_write,
7454 .llseek = generic_file_llseek,
7455 .release = tracing_release_generic_tr,
7456};
7457
Zhaolei5079f322009-08-25 16:12:56 +08007458static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007459 .open = tracing_clock_open,
7460 .read = seq_read,
7461 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007462 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007463 .write = tracing_clock_write,
7464};
7465
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007466static const struct file_operations trace_time_stamp_mode_fops = {
7467 .open = tracing_time_stamp_mode_open,
7468 .read = seq_read,
7469 .llseek = seq_lseek,
7470 .release = tracing_single_release_tr,
7471};
7472
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007473#ifdef CONFIG_TRACER_SNAPSHOT
7474static const struct file_operations snapshot_fops = {
7475 .open = tracing_snapshot_open,
7476 .read = seq_read,
7477 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007478 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007479 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007480};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007481
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007482static const struct file_operations snapshot_raw_fops = {
7483 .open = snapshot_raw_open,
7484 .read = tracing_buffers_read,
7485 .release = tracing_buffers_release,
7486 .splice_read = tracing_buffers_splice_read,
7487 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007488};
7489
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007490#endif /* CONFIG_TRACER_SNAPSHOT */
7491
Tom Zanussi8a062902019-03-31 18:48:15 -05007492#define TRACING_LOG_ERRS_MAX 8
7493#define TRACING_LOG_LOC_MAX 128
7494
7495#define CMD_PREFIX " Command: "
7496
7497struct err_info {
7498 const char **errs; /* ptr to loc-specific array of err strings */
7499 u8 type; /* index into errs -> specific err string */
7500 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7501 u64 ts;
7502};
7503
7504struct tracing_log_err {
7505 struct list_head list;
7506 struct err_info info;
7507 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7508 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7509};
7510
Tom Zanussi8a062902019-03-31 18:48:15 -05007511static DEFINE_MUTEX(tracing_err_log_lock);
7512
YueHaibingff585c52019-06-14 23:32:10 +08007513static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007514{
7515 struct tracing_log_err *err;
7516
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007517 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007518 err = kzalloc(sizeof(*err), GFP_KERNEL);
7519 if (!err)
7520 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007521 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007522
7523 return err;
7524 }
7525
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007526 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007527 list_del(&err->list);
7528
7529 return err;
7530}
7531
7532/**
7533 * err_pos - find the position of a string within a command for error careting
7534 * @cmd: The tracing command that caused the error
7535 * @str: The string to position the caret at within @cmd
7536 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +01007537 * Finds the position of the first occurrence of @str within @cmd. The
Tom Zanussi8a062902019-03-31 18:48:15 -05007538 * return value can be passed to tracing_log_err() for caret placement
7539 * within @cmd.
7540 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +01007541 * Returns the index within @cmd of the first occurrence of @str or 0
Tom Zanussi8a062902019-03-31 18:48:15 -05007542 * if @str was not found.
7543 */
7544unsigned int err_pos(char *cmd, const char *str)
7545{
7546 char *found;
7547
7548 if (WARN_ON(!strlen(cmd)))
7549 return 0;
7550
7551 found = strstr(cmd, str);
7552 if (found)
7553 return found - cmd;
7554
7555 return 0;
7556}
7557
7558/**
7559 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007560 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007561 * @loc: A string describing where the error occurred
7562 * @cmd: The tracing command that caused the error
7563 * @errs: The array of loc-specific static error strings
7564 * @type: The index into errs[], which produces the specific static err string
7565 * @pos: The position the caret should be placed in the cmd
7566 *
7567 * Writes an error into tracing/error_log of the form:
7568 *
7569 * <loc>: error: <text>
7570 * Command: <cmd>
7571 * ^
7572 *
7573 * tracing/error_log is a small log file containing the last
7574 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7575 * unless there has been a tracing error, and the error log can be
7576 * cleared and have its memory freed by writing the empty string in
7577 * truncation mode to it i.e. echo > tracing/error_log.
7578 *
7579 * NOTE: the @errs array along with the @type param are used to
7580 * produce a static error string - this string is not copied and saved
7581 * when the error is logged - only a pointer to it is saved. See
7582 * existing callers for examples of how static strings are typically
7583 * defined for use with tracing_log_err().
7584 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007585void tracing_log_err(struct trace_array *tr,
7586 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007587 const char **errs, u8 type, u8 pos)
7588{
7589 struct tracing_log_err *err;
7590
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007591 if (!tr)
7592 tr = &global_trace;
7593
Tom Zanussi8a062902019-03-31 18:48:15 -05007594 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007595 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007596 if (PTR_ERR(err) == -ENOMEM) {
7597 mutex_unlock(&tracing_err_log_lock);
7598 return;
7599 }
7600
7601 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7602 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7603
7604 err->info.errs = errs;
7605 err->info.type = type;
7606 err->info.pos = pos;
7607 err->info.ts = local_clock();
7608
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007609 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007610 mutex_unlock(&tracing_err_log_lock);
7611}
7612
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007613static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007614{
7615 struct tracing_log_err *err, *next;
7616
7617 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007618 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007619 list_del(&err->list);
7620 kfree(err);
7621 }
7622
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007623 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007624 mutex_unlock(&tracing_err_log_lock);
7625}
7626
7627static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7628{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007629 struct trace_array *tr = m->private;
7630
Tom Zanussi8a062902019-03-31 18:48:15 -05007631 mutex_lock(&tracing_err_log_lock);
7632
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007633 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007634}
7635
7636static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7637{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007638 struct trace_array *tr = m->private;
7639
7640 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007641}
7642
7643static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7644{
7645 mutex_unlock(&tracing_err_log_lock);
7646}
7647
7648static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7649{
7650 u8 i;
7651
7652 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7653 seq_putc(m, ' ');
7654 for (i = 0; i < pos; i++)
7655 seq_putc(m, ' ');
7656 seq_puts(m, "^\n");
7657}
7658
7659static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7660{
7661 struct tracing_log_err *err = v;
7662
7663 if (err) {
7664 const char *err_text = err->info.errs[err->info.type];
7665 u64 sec = err->info.ts;
7666 u32 nsec;
7667
7668 nsec = do_div(sec, NSEC_PER_SEC);
7669 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7670 err->loc, err_text);
7671 seq_printf(m, "%s", err->cmd);
7672 tracing_err_log_show_pos(m, err->info.pos);
7673 }
7674
7675 return 0;
7676}
7677
7678static const struct seq_operations tracing_err_log_seq_ops = {
7679 .start = tracing_err_log_seq_start,
7680 .next = tracing_err_log_seq_next,
7681 .stop = tracing_err_log_seq_stop,
7682 .show = tracing_err_log_seq_show
7683};
7684
7685static int tracing_err_log_open(struct inode *inode, struct file *file)
7686{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007687 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007688 int ret = 0;
7689
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007690 ret = tracing_check_open_get_tr(tr);
7691 if (ret)
7692 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007693
Tom Zanussi8a062902019-03-31 18:48:15 -05007694 /* If this file was opened for write, then erase contents */
7695 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007696 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007697
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007698 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007699 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007700 if (!ret) {
7701 struct seq_file *m = file->private_data;
7702 m->private = tr;
7703 } else {
7704 trace_array_put(tr);
7705 }
7706 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007707 return ret;
7708}
7709
7710static ssize_t tracing_err_log_write(struct file *file,
7711 const char __user *buffer,
7712 size_t count, loff_t *ppos)
7713{
7714 return count;
7715}
7716
Takeshi Misawad122ed62019-06-28 19:56:40 +09007717static int tracing_err_log_release(struct inode *inode, struct file *file)
7718{
7719 struct trace_array *tr = inode->i_private;
7720
7721 trace_array_put(tr);
7722
7723 if (file->f_mode & FMODE_READ)
7724 seq_release(inode, file);
7725
7726 return 0;
7727}
7728
Tom Zanussi8a062902019-03-31 18:48:15 -05007729static const struct file_operations tracing_err_log_fops = {
7730 .open = tracing_err_log_open,
7731 .write = tracing_err_log_write,
7732 .read = seq_read,
7733 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007734 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007735};
7736
Steven Rostedt2cadf912008-12-01 22:20:19 -05007737static int tracing_buffers_open(struct inode *inode, struct file *filp)
7738{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007739 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007740 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007741 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007742
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007743 ret = tracing_check_open_get_tr(tr);
7744 if (ret)
7745 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007746
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007747 info = kvzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007748 if (!info) {
7749 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007750 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007751 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007752
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007753 mutex_lock(&trace_types_lock);
7754
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007755 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007756 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007757 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007758 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007759 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007760 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007761 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007762
7763 filp->private_data = info;
7764
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007765 tr->trace_ref++;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007766
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007767 mutex_unlock(&trace_types_lock);
7768
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007769 ret = nonseekable_open(inode, filp);
7770 if (ret < 0)
7771 trace_array_put(tr);
7772
7773 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007774}
7775
Al Viro9dd95742017-07-03 00:42:43 -04007776static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007777tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7778{
7779 struct ftrace_buffer_info *info = filp->private_data;
7780 struct trace_iterator *iter = &info->iter;
7781
7782 return trace_poll(iter, filp, poll_table);
7783}
7784
Steven Rostedt2cadf912008-12-01 22:20:19 -05007785static ssize_t
7786tracing_buffers_read(struct file *filp, char __user *ubuf,
7787 size_t count, loff_t *ppos)
7788{
7789 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007790 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007791 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007792 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007793
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007794 if (!count)
7795 return 0;
7796
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007797#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007798 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7799 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007800#endif
7801
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007802 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007803 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007804 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007805 if (IS_ERR(info->spare)) {
7806 ret = PTR_ERR(info->spare);
7807 info->spare = NULL;
7808 } else {
7809 info->spare_cpu = iter->cpu_file;
7810 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007811 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007812 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007813 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007814
Steven Rostedt2cadf912008-12-01 22:20:19 -05007815 /* Do we have previous read data to read? */
7816 if (info->read < PAGE_SIZE)
7817 goto read;
7818
Steven Rostedtb6273442013-02-28 13:44:11 -05007819 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007820 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007821 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007822 &info->spare,
7823 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007824 iter->cpu_file, 0);
7825 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007826
7827 if (ret < 0) {
7828 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007829 if ((filp->f_flags & O_NONBLOCK))
7830 return -EAGAIN;
7831
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007832 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007833 if (ret)
7834 return ret;
7835
Steven Rostedtb6273442013-02-28 13:44:11 -05007836 goto again;
7837 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007838 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007839 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007840
Steven Rostedt436fc282011-10-14 10:44:25 -04007841 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007842 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007843 size = PAGE_SIZE - info->read;
7844 if (size > count)
7845 size = count;
7846
7847 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007848 if (ret == size)
7849 return -EFAULT;
7850
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007851 size -= ret;
7852
Steven Rostedt2cadf912008-12-01 22:20:19 -05007853 *ppos += size;
7854 info->read += size;
7855
7856 return size;
7857}
7858
7859static int tracing_buffers_release(struct inode *inode, struct file *file)
7860{
7861 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007862 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007863
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007864 mutex_lock(&trace_types_lock);
7865
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007866 iter->tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007867
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007868 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007869
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007870 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007871 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007872 info->spare_cpu, info->spare);
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007873 kvfree(info);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007874
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007875 mutex_unlock(&trace_types_lock);
7876
Steven Rostedt2cadf912008-12-01 22:20:19 -05007877 return 0;
7878}
7879
7880struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007881 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007882 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007883 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007884 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007885};
7886
Jann Hornb9872222019-04-04 23:59:25 +02007887static void buffer_ref_release(struct buffer_ref *ref)
7888{
7889 if (!refcount_dec_and_test(&ref->refcount))
7890 return;
7891 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7892 kfree(ref);
7893}
7894
Steven Rostedt2cadf912008-12-01 22:20:19 -05007895static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7896 struct pipe_buffer *buf)
7897{
7898 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7899
Jann Hornb9872222019-04-04 23:59:25 +02007900 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007901 buf->private = 0;
7902}
7903
Matthew Wilcox15fab632019-04-05 14:02:10 -07007904static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007905 struct pipe_buffer *buf)
7906{
7907 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7908
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007909 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007910 return false;
7911
Jann Hornb9872222019-04-04 23:59:25 +02007912 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007913 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007914}
7915
7916/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007917static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007918 .release = buffer_pipe_buf_release,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007919 .get = buffer_pipe_buf_get,
7920};
7921
7922/*
7923 * Callback from splice_to_pipe(), if we need to release some pages
7924 * at the end of the spd in case we error'ed out in filling the pipe.
7925 */
7926static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7927{
7928 struct buffer_ref *ref =
7929 (struct buffer_ref *)spd->partial[i].private;
7930
Jann Hornb9872222019-04-04 23:59:25 +02007931 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007932 spd->partial[i].private = 0;
7933}
7934
7935static ssize_t
7936tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7937 struct pipe_inode_info *pipe, size_t len,
7938 unsigned int flags)
7939{
7940 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007941 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007942 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7943 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007944 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007945 .pages = pages_def,
7946 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007947 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007948 .ops = &buffer_pipe_buf_ops,
7949 .spd_release = buffer_spd_release,
7950 };
7951 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05007952 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01007953 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007954
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007955#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007956 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7957 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007958#endif
7959
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007960 if (*ppos & (PAGE_SIZE - 1))
7961 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007962
7963 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007964 if (len < PAGE_SIZE)
7965 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08007966 len &= PAGE_MASK;
7967 }
7968
Al Viro1ae22932016-09-17 18:31:46 -04007969 if (splice_grow_spd(pipe, &spd))
7970 return -ENOMEM;
7971
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007972 again:
7973 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007974 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04007975
Al Viroa786c062014-04-11 12:01:03 -04007976 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007977 struct page *page;
7978 int r;
7979
7980 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01007981 if (!ref) {
7982 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007983 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01007984 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007985
Jann Hornb9872222019-04-04 23:59:25 +02007986 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007987 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007988 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007989 if (IS_ERR(ref->page)) {
7990 ret = PTR_ERR(ref->page);
7991 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007992 kfree(ref);
7993 break;
7994 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007995 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007996
7997 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007998 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007999 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008000 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8001 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008002 kfree(ref);
8003 break;
8004 }
8005
Steven Rostedt2cadf912008-12-01 22:20:19 -05008006 page = virt_to_page(ref->page);
8007
8008 spd.pages[i] = page;
8009 spd.partial[i].len = PAGE_SIZE;
8010 spd.partial[i].offset = 0;
8011 spd.partial[i].private = (unsigned long)ref;
8012 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008013 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04008014
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008015 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008016 }
8017
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008018 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008019 spd.nr_pages = i;
8020
8021 /* did we read anything? */
8022 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01008023 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04008024 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01008025
Al Viro1ae22932016-09-17 18:31:46 -04008026 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008027 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04008028 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008029
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008030 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04008031 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04008032 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01008033
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008034 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008035 }
8036
8037 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04008038out:
Eric Dumazet047fe362012-06-12 15:24:40 +02008039 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008040
Steven Rostedt2cadf912008-12-01 22:20:19 -05008041 return ret;
8042}
8043
8044static const struct file_operations tracing_buffers_fops = {
8045 .open = tracing_buffers_open,
8046 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008047 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008048 .release = tracing_buffers_release,
8049 .splice_read = tracing_buffers_splice_read,
8050 .llseek = no_llseek,
8051};
8052
Steven Rostedtc8d77182009-04-29 18:03:45 -04008053static ssize_t
8054tracing_stats_read(struct file *filp, char __user *ubuf,
8055 size_t count, loff_t *ppos)
8056{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008057 struct inode *inode = file_inode(filp);
8058 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008059 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008060 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008061 struct trace_seq *s;
8062 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008063 unsigned long long t;
8064 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04008065
Li Zefane4f2d102009-06-15 10:57:28 +08008066 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008067 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01008068 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04008069
8070 trace_seq_init(s);
8071
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008072 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008073 trace_seq_printf(s, "entries: %ld\n", cnt);
8074
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008075 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008076 trace_seq_printf(s, "overrun: %ld\n", cnt);
8077
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008078 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008079 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8080
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008081 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008082 trace_seq_printf(s, "bytes: %ld\n", cnt);
8083
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09008084 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008085 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008086 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008087 usec_rem = do_div(t, USEC_PER_SEC);
8088 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8089 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008090
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03008091 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008092 usec_rem = do_div(t, USEC_PER_SEC);
8093 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8094 } else {
8095 /* counter or tsc mode for trace_clock */
8096 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008097 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008098
8099 trace_seq_printf(s, "now ts: %llu\n",
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03008100 ring_buffer_time_stamp(trace_buf->buffer));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008101 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008102
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008103 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07008104 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8105
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008106 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05008107 trace_seq_printf(s, "read events: %ld\n", cnt);
8108
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05008109 count = simple_read_from_buffer(ubuf, count, ppos,
8110 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04008111
8112 kfree(s);
8113
8114 return count;
8115}
8116
8117static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008118 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04008119 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008120 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008121 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04008122};
8123
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008124#ifdef CONFIG_DYNAMIC_FTRACE
8125
8126static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008127tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008128 size_t cnt, loff_t *ppos)
8129{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008130 ssize_t ret;
8131 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008132 int r;
8133
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008134 /* 256 should be plenty to hold the amount needed */
8135 buf = kmalloc(256, GFP_KERNEL);
8136 if (!buf)
8137 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008138
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008139 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8140 ftrace_update_tot_cnt,
8141 ftrace_number_of_pages,
8142 ftrace_number_of_groups);
8143
8144 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8145 kfree(buf);
8146 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008147}
8148
Steven Rostedt5e2336a2009-03-05 21:44:55 -05008149static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02008150 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008151 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008152 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008153};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008154#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008155
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008156#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8157static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04008158ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008159 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008160 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008161{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04008162 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008163}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008164
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008165static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04008166ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008167 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008168 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008169{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008170 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008171 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008172
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008173 if (mapper)
8174 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008175
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008176 if (count) {
8177
8178 if (*count <= 0)
8179 return;
8180
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008181 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008182 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008183
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04008184 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008185}
8186
8187static int
8188ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8189 struct ftrace_probe_ops *ops, void *data)
8190{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008191 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008192 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008193
8194 seq_printf(m, "%ps:", (void *)ip);
8195
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01008196 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008197
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008198 if (mapper)
8199 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8200
8201 if (count)
8202 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008203 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008204 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008205
8206 return 0;
8207}
8208
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008209static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008210ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008211 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008212{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008213 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008214
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008215 if (!mapper) {
8216 mapper = allocate_ftrace_func_mapper();
8217 if (!mapper)
8218 return -ENOMEM;
8219 *data = mapper;
8220 }
8221
8222 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008223}
8224
8225static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008226ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008227 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008228{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008229 struct ftrace_func_mapper *mapper = data;
8230
8231 if (!ip) {
8232 if (!mapper)
8233 return;
8234 free_ftrace_func_mapper(mapper, NULL);
8235 return;
8236 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008237
8238 ftrace_func_mapper_remove_ip(mapper, ip);
8239}
8240
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008241static struct ftrace_probe_ops snapshot_probe_ops = {
8242 .func = ftrace_snapshot,
8243 .print = ftrace_snapshot_print,
8244};
8245
8246static struct ftrace_probe_ops snapshot_count_probe_ops = {
8247 .func = ftrace_count_snapshot,
8248 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008249 .init = ftrace_snapshot_init,
8250 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008251};
8252
8253static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008254ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008255 char *glob, char *cmd, char *param, int enable)
8256{
8257 struct ftrace_probe_ops *ops;
8258 void *count = (void *)-1;
8259 char *number;
8260 int ret;
8261
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04008262 if (!tr)
8263 return -ENODEV;
8264
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008265 /* hash funcs only work with set_ftrace_filter */
8266 if (!enable)
8267 return -EINVAL;
8268
8269 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8270
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04008271 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04008272 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008273
8274 if (!param)
8275 goto out_reg;
8276
8277 number = strsep(&param, ":");
8278
8279 if (!strlen(number))
8280 goto out_reg;
8281
8282 /*
8283 * We use the callback data field (which is a pointer)
8284 * as our counter.
8285 */
8286 ret = kstrtoul(number, 0, (unsigned long *)&count);
8287 if (ret)
8288 return ret;
8289
8290 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04008291 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008292 if (ret < 0)
8293 goto out;
8294
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008295 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008296
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008297 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008298 return ret < 0 ? ret : 0;
8299}
8300
8301static struct ftrace_func_command ftrace_snapshot_cmd = {
8302 .name = "snapshot",
8303 .func = ftrace_trace_snapshot_callback,
8304};
8305
Tom Zanussi38de93a2013-10-24 08:34:18 -05008306static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008307{
8308 return register_ftrace_command(&ftrace_snapshot_cmd);
8309}
8310#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05008311static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008312#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008313
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008314static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008315{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008316 if (WARN_ON(!tr->dir))
8317 return ERR_PTR(-ENODEV);
8318
8319 /* Top directory uses NULL as the parent */
8320 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8321 return NULL;
8322
8323 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008324 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008325}
8326
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008327static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8328{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008329 struct dentry *d_tracer;
8330
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008331 if (tr->percpu_dir)
8332 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008333
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008334 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008335 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008336 return NULL;
8337
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008338 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008339
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008340 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008341 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008342
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008343 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008344}
8345
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008346static struct dentry *
8347trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8348 void *data, long cpu, const struct file_operations *fops)
8349{
8350 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8351
8352 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008353 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008354 return ret;
8355}
8356
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008357static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008358tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008359{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008360 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008361 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008362 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008363
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008364 if (!d_percpu)
8365 return;
8366
Steven Rostedtdd49a382010-10-20 21:51:26 -04008367 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008368 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008369 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008370 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008371 return;
8372 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008373
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008374 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008375 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008376 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008377
8378 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008379 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008380 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008381
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008382 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008383 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008384
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008385 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008386 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008387
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008388 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008389 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008390
8391#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008392 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008393 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008394
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008395 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008396 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008397#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008398}
8399
Steven Rostedt60a11772008-05-12 21:20:44 +02008400#ifdef CONFIG_FTRACE_SELFTEST
8401/* Let selftest have access to static functions in this file */
8402#include "trace_selftest.c"
8403#endif
8404
Steven Rostedt577b7852009-02-26 23:43:05 -05008405static ssize_t
8406trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8407 loff_t *ppos)
8408{
8409 struct trace_option_dentry *topt = filp->private_data;
8410 char *buf;
8411
8412 if (topt->flags->val & topt->opt->bit)
8413 buf = "1\n";
8414 else
8415 buf = "0\n";
8416
8417 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8418}
8419
8420static ssize_t
8421trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8422 loff_t *ppos)
8423{
8424 struct trace_option_dentry *topt = filp->private_data;
8425 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008426 int ret;
8427
Peter Huewe22fe9b52011-06-07 21:58:27 +02008428 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8429 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008430 return ret;
8431
Li Zefan8d18eaa2009-12-08 11:17:06 +08008432 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008433 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008434
8435 if (!!(topt->flags->val & topt->opt->bit) != val) {
8436 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008437 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008438 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008439 mutex_unlock(&trace_types_lock);
8440 if (ret)
8441 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008442 }
8443
8444 *ppos += cnt;
8445
8446 return cnt;
8447}
8448
8449
8450static const struct file_operations trace_options_fops = {
8451 .open = tracing_open_generic,
8452 .read = trace_options_read,
8453 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008454 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008455};
8456
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008457/*
8458 * In order to pass in both the trace_array descriptor as well as the index
8459 * to the flag that the trace option file represents, the trace_array
8460 * has a character array of trace_flags_index[], which holds the index
8461 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8462 * The address of this character array is passed to the flag option file
8463 * read/write callbacks.
8464 *
8465 * In order to extract both the index and the trace_array descriptor,
8466 * get_tr_index() uses the following algorithm.
8467 *
8468 * idx = *ptr;
8469 *
8470 * As the pointer itself contains the address of the index (remember
8471 * index[1] == 1).
8472 *
8473 * Then to get the trace_array descriptor, by subtracting that index
8474 * from the ptr, we get to the start of the index itself.
8475 *
8476 * ptr - idx == &index[0]
8477 *
8478 * Then a simple container_of() from that pointer gets us to the
8479 * trace_array descriptor.
8480 */
8481static void get_tr_index(void *data, struct trace_array **ptr,
8482 unsigned int *pindex)
8483{
8484 *pindex = *(unsigned char *)data;
8485
8486 *ptr = container_of(data - *pindex, struct trace_array,
8487 trace_flags_index);
8488}
8489
Steven Rostedta8259072009-02-26 22:19:12 -05008490static ssize_t
8491trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8492 loff_t *ppos)
8493{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008494 void *tr_index = filp->private_data;
8495 struct trace_array *tr;
8496 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008497 char *buf;
8498
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008499 get_tr_index(tr_index, &tr, &index);
8500
8501 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008502 buf = "1\n";
8503 else
8504 buf = "0\n";
8505
8506 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8507}
8508
8509static ssize_t
8510trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8511 loff_t *ppos)
8512{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008513 void *tr_index = filp->private_data;
8514 struct trace_array *tr;
8515 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008516 unsigned long val;
8517 int ret;
8518
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008519 get_tr_index(tr_index, &tr, &index);
8520
Peter Huewe22fe9b52011-06-07 21:58:27 +02008521 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8522 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008523 return ret;
8524
Zhaoleif2d84b62009-08-07 18:55:48 +08008525 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008526 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008527
Prateek Sood3a53acf2019-12-10 09:15:16 +00008528 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008529 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008530 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008531 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008532 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008533
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008534 if (ret < 0)
8535 return ret;
8536
Steven Rostedta8259072009-02-26 22:19:12 -05008537 *ppos += cnt;
8538
8539 return cnt;
8540}
8541
Steven Rostedta8259072009-02-26 22:19:12 -05008542static const struct file_operations trace_options_core_fops = {
8543 .open = tracing_open_generic,
8544 .read = trace_options_core_read,
8545 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008546 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008547};
8548
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008549struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008550 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008551 struct dentry *parent,
8552 void *data,
8553 const struct file_operations *fops)
8554{
8555 struct dentry *ret;
8556
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008557 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008558 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008559 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008560
8561 return ret;
8562}
8563
8564
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008565static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008566{
8567 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008568
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008569 if (tr->options)
8570 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008571
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008572 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008573 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008574 return NULL;
8575
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008576 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008577 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008578 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008579 return NULL;
8580 }
8581
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008582 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008583}
8584
Steven Rostedt577b7852009-02-26 23:43:05 -05008585static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008586create_trace_option_file(struct trace_array *tr,
8587 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008588 struct tracer_flags *flags,
8589 struct tracer_opt *opt)
8590{
8591 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008592
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008593 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008594 if (!t_options)
8595 return;
8596
8597 topt->flags = flags;
8598 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008599 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008600
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008601 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008602 &trace_options_fops);
8603
Steven Rostedt577b7852009-02-26 23:43:05 -05008604}
8605
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008606static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008607create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008608{
8609 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008610 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008611 struct tracer_flags *flags;
8612 struct tracer_opt *opts;
8613 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008614 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008615
8616 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008617 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008618
8619 flags = tracer->flags;
8620
8621 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008622 return;
8623
8624 /*
8625 * If this is an instance, only create flags for tracers
8626 * the instance may have.
8627 */
8628 if (!trace_ok_for_array(tracer, tr))
8629 return;
8630
8631 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008632 /* Make sure there's no duplicate flags. */
8633 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008634 return;
8635 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008636
8637 opts = flags->opts;
8638
8639 for (cnt = 0; opts[cnt].name; cnt++)
8640 ;
8641
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008642 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008643 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008644 return;
8645
8646 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8647 GFP_KERNEL);
8648 if (!tr_topts) {
8649 kfree(topts);
8650 return;
8651 }
8652
8653 tr->topts = tr_topts;
8654 tr->topts[tr->nr_topts].tracer = tracer;
8655 tr->topts[tr->nr_topts].topts = topts;
8656 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008657
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008658 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008659 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008660 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008661 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008662 "Failed to create trace option: %s",
8663 opts[cnt].name);
8664 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008665}
8666
Steven Rostedta8259072009-02-26 22:19:12 -05008667static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008668create_trace_option_core_file(struct trace_array *tr,
8669 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008670{
8671 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008672
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008673 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008674 if (!t_options)
8675 return NULL;
8676
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008677 return trace_create_file(option, 0644, t_options,
8678 (void *)&tr->trace_flags_index[index],
8679 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008680}
8681
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008682static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008683{
8684 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008685 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008686 int i;
8687
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008688 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008689 if (!t_options)
8690 return;
8691
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008692 for (i = 0; trace_options[i]; i++) {
8693 if (top_level ||
8694 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8695 create_trace_option_core_file(tr, trace_options[i], i);
8696 }
Steven Rostedta8259072009-02-26 22:19:12 -05008697}
8698
Steven Rostedt499e5472012-02-22 15:50:28 -05008699static ssize_t
8700rb_simple_read(struct file *filp, char __user *ubuf,
8701 size_t cnt, loff_t *ppos)
8702{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008703 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008704 char buf[64];
8705 int r;
8706
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008707 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008708 r = sprintf(buf, "%d\n", r);
8709
8710 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8711}
8712
8713static ssize_t
8714rb_simple_write(struct file *filp, const char __user *ubuf,
8715 size_t cnt, loff_t *ppos)
8716{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008717 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008718 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008719 unsigned long val;
8720 int ret;
8721
8722 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8723 if (ret)
8724 return ret;
8725
8726 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008727 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008728 if (!!val == tracer_tracing_is_on(tr)) {
8729 val = 0; /* do nothing */
8730 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008731 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008732 if (tr->current_trace->start)
8733 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008734 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008735 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008736 if (tr->current_trace->stop)
8737 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008738 }
8739 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008740 }
8741
8742 (*ppos)++;
8743
8744 return cnt;
8745}
8746
8747static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008748 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008749 .read = rb_simple_read,
8750 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008751 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008752 .llseek = default_llseek,
8753};
8754
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008755static ssize_t
8756buffer_percent_read(struct file *filp, char __user *ubuf,
8757 size_t cnt, loff_t *ppos)
8758{
8759 struct trace_array *tr = filp->private_data;
8760 char buf[64];
8761 int r;
8762
8763 r = tr->buffer_percent;
8764 r = sprintf(buf, "%d\n", r);
8765
8766 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8767}
8768
8769static ssize_t
8770buffer_percent_write(struct file *filp, const char __user *ubuf,
8771 size_t cnt, loff_t *ppos)
8772{
8773 struct trace_array *tr = filp->private_data;
8774 unsigned long val;
8775 int ret;
8776
8777 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8778 if (ret)
8779 return ret;
8780
8781 if (val > 100)
8782 return -EINVAL;
8783
8784 if (!val)
8785 val = 1;
8786
8787 tr->buffer_percent = val;
8788
8789 (*ppos)++;
8790
8791 return cnt;
8792}
8793
8794static const struct file_operations buffer_percent_fops = {
8795 .open = tracing_open_generic_tr,
8796 .read = buffer_percent_read,
8797 .write = buffer_percent_write,
8798 .release = tracing_release_generic_tr,
8799 .llseek = default_llseek,
8800};
8801
YueHaibingff585c52019-06-14 23:32:10 +08008802static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008803
8804static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008805init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008806
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008807static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008808allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008809{
8810 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008811
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008812 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008813
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008814 buf->tr = tr;
8815
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008816 buf->buffer = ring_buffer_alloc(size, rb_flags);
8817 if (!buf->buffer)
8818 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008819
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008820 buf->data = alloc_percpu(struct trace_array_cpu);
8821 if (!buf->data) {
8822 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008823 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008824 return -ENOMEM;
8825 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008826
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008827 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008828 set_buffer_entries(&tr->array_buffer,
8829 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008830
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008831 return 0;
8832}
8833
8834static int allocate_trace_buffers(struct trace_array *tr, int size)
8835{
8836 int ret;
8837
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008838 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008839 if (ret)
8840 return ret;
8841
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008842#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008843 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8844 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008845 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008846 ring_buffer_free(tr->array_buffer.buffer);
8847 tr->array_buffer.buffer = NULL;
8848 free_percpu(tr->array_buffer.data);
8849 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008850 return -ENOMEM;
8851 }
8852 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008853
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008854 /*
8855 * Only the top level trace array gets its snapshot allocated
8856 * from the kernel command line.
8857 */
8858 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008859#endif
Steven Rostedt (VMware)11f5efc2020-05-06 10:36:18 -04008860
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008861 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008862}
8863
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008864static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008865{
8866 if (buf->buffer) {
8867 ring_buffer_free(buf->buffer);
8868 buf->buffer = NULL;
8869 free_percpu(buf->data);
8870 buf->data = NULL;
8871 }
8872}
8873
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008874static void free_trace_buffers(struct trace_array *tr)
8875{
8876 if (!tr)
8877 return;
8878
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008879 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008880
8881#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008882 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008883#endif
8884}
8885
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008886static void init_trace_flags_index(struct trace_array *tr)
8887{
8888 int i;
8889
8890 /* Used by the trace options files */
8891 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8892 tr->trace_flags_index[i] = i;
8893}
8894
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008895static void __update_tracer_options(struct trace_array *tr)
8896{
8897 struct tracer *t;
8898
8899 for (t = trace_types; t; t = t->next)
8900 add_tracer_options(tr, t);
8901}
8902
8903static void update_tracer_options(struct trace_array *tr)
8904{
8905 mutex_lock(&trace_types_lock);
8906 __update_tracer_options(tr);
8907 mutex_unlock(&trace_types_lock);
8908}
8909
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008910/* Must have trace_types_lock held */
8911struct trace_array *trace_array_find(const char *instance)
8912{
8913 struct trace_array *tr, *found = NULL;
8914
8915 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8916 if (tr->name && strcmp(tr->name, instance) == 0) {
8917 found = tr;
8918 break;
8919 }
8920 }
8921
8922 return found;
8923}
8924
8925struct trace_array *trace_array_find_get(const char *instance)
8926{
8927 struct trace_array *tr;
8928
8929 mutex_lock(&trace_types_lock);
8930 tr = trace_array_find(instance);
8931 if (tr)
8932 tr->ref++;
8933 mutex_unlock(&trace_types_lock);
8934
8935 return tr;
8936}
8937
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008938static int trace_array_create_dir(struct trace_array *tr)
8939{
8940 int ret;
8941
8942 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8943 if (!tr->dir)
8944 return -EINVAL;
8945
8946 ret = event_trace_add_tracer(tr->dir, tr);
8947 if (ret)
8948 tracefs_remove(tr->dir);
8949
8950 init_tracer_tracefs(tr, tr->dir);
8951 __update_tracer_options(tr);
8952
8953 return ret;
8954}
8955
Divya Indi28879782019-11-20 11:08:38 -08008956static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008957{
Steven Rostedt277ba042012-08-03 16:10:49 -04008958 struct trace_array *tr;
8959 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04008960
Steven Rostedt277ba042012-08-03 16:10:49 -04008961 ret = -ENOMEM;
8962 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8963 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08008964 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04008965
8966 tr->name = kstrdup(name, GFP_KERNEL);
8967 if (!tr->name)
8968 goto out_free_tr;
8969
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008970 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8971 goto out_free_tr;
8972
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04008973 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008974
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07008975 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8976
Steven Rostedt277ba042012-08-03 16:10:49 -04008977 raw_spin_lock_init(&tr->start_lock);
8978
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05008979 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8980
Steven Rostedt277ba042012-08-03 16:10:49 -04008981 tr->current_trace = &nop_trace;
8982
8983 INIT_LIST_HEAD(&tr->systems);
8984 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06008985 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04008986 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04008987
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008988 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008989 goto out_free_tr;
8990
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008991 if (ftrace_allocate_ftrace_ops(tr) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04008992 goto out_free_tr;
8993
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008994 ftrace_init_trace_array(tr);
8995
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008996 init_trace_flags_index(tr);
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008997
8998 if (trace_instance_dir) {
8999 ret = trace_array_create_dir(tr);
9000 if (ret)
9001 goto out_free_tr;
Masami Hiramatsu720dee52020-09-25 01:40:08 +09009002 } else
9003 __trace_early_add_events(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04009004
9005 list_add(&tr->list, &ftrace_trace_arrays);
9006
Divya Indi28879782019-11-20 11:08:38 -08009007 tr->ref++;
9008
Divya Indif45d1222019-03-20 11:28:51 -07009009 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04009010
9011 out_free_tr:
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009012 ftrace_free_ftrace_ops(tr);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009013 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009014 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04009015 kfree(tr->name);
9016 kfree(tr);
9017
Divya Indif45d1222019-03-20 11:28:51 -07009018 return ERR_PTR(ret);
9019}
Steven Rostedt277ba042012-08-03 16:10:49 -04009020
Divya Indif45d1222019-03-20 11:28:51 -07009021static int instance_mkdir(const char *name)
9022{
Divya Indi28879782019-11-20 11:08:38 -08009023 struct trace_array *tr;
9024 int ret;
9025
9026 mutex_lock(&event_mutex);
9027 mutex_lock(&trace_types_lock);
9028
9029 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009030 if (trace_array_find(name))
9031 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08009032
9033 tr = trace_array_create(name);
9034
9035 ret = PTR_ERR_OR_ZERO(tr);
9036
9037out_unlock:
9038 mutex_unlock(&trace_types_lock);
9039 mutex_unlock(&event_mutex);
9040 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04009041}
9042
Divya Indi28879782019-11-20 11:08:38 -08009043/**
9044 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9045 * @name: The name of the trace array to be looked up/created.
9046 *
9047 * Returns pointer to trace array with given name.
9048 * NULL, if it cannot be created.
9049 *
9050 * NOTE: This function increments the reference counter associated with the
9051 * trace array returned. This makes sure it cannot be freed while in use.
9052 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05009053 * If the trace_array is to be freed, trace_array_destroy() needs to
9054 * be called after the trace_array_put(), or simply let user space delete
9055 * it from the tracefs instances directory. But until the
9056 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08009057 *
9058 */
9059struct trace_array *trace_array_get_by_name(const char *name)
9060{
9061 struct trace_array *tr;
9062
9063 mutex_lock(&event_mutex);
9064 mutex_lock(&trace_types_lock);
9065
9066 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9067 if (tr->name && strcmp(tr->name, name) == 0)
9068 goto out_unlock;
9069 }
9070
9071 tr = trace_array_create(name);
9072
9073 if (IS_ERR(tr))
9074 tr = NULL;
9075out_unlock:
9076 if (tr)
9077 tr->ref++;
9078
9079 mutex_unlock(&trace_types_lock);
9080 mutex_unlock(&event_mutex);
9081 return tr;
9082}
9083EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9084
Divya Indif45d1222019-03-20 11:28:51 -07009085static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009086{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009087 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009088
Divya Indi28879782019-11-20 11:08:38 -08009089 /* Reference counter for a newly created trace array = 1. */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04009090 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
Divya Indif45d1222019-03-20 11:28:51 -07009091 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05009092
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009093 list_del(&tr->list);
9094
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04009095 /* Disable all the flags that were enabled coming in */
9096 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9097 if ((1 << i) & ZEROED_TRACE_FLAGS)
9098 set_tracer_flag(tr, 1 << i, 0);
9099 }
9100
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05009101 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05309102 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009103 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09009104 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009105 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05009106 tracefs_remove(tr->dir);
Yordan Karadzhov (VMware)20344c52021-04-15 21:18:51 +03009107 free_percpu(tr->last_func_repeats);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04009108 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009109
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009110 for (i = 0; i < tr->nr_topts; i++) {
9111 kfree(tr->topts[i].topts);
9112 }
9113 kfree(tr->topts);
9114
Chunyu Hudb9108e02017-07-20 18:36:09 +08009115 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009116 kfree(tr->name);
9117 kfree(tr);
9118
Divya Indif45d1222019-03-20 11:28:51 -07009119 return 0;
9120}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009121
Divya Indie585e642019-08-14 10:55:24 -07009122int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07009123{
Divya Indie585e642019-08-14 10:55:24 -07009124 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07009125 int ret;
9126
Divya Indie585e642019-08-14 10:55:24 -07009127 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07009128 return -EINVAL;
9129
9130 mutex_lock(&event_mutex);
9131 mutex_lock(&trace_types_lock);
9132
Divya Indie585e642019-08-14 10:55:24 -07009133 ret = -ENODEV;
9134
9135 /* Making sure trace array exists before destroying it. */
9136 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9137 if (tr == this_tr) {
9138 ret = __remove_instance(tr);
9139 break;
9140 }
9141 }
Divya Indif45d1222019-03-20 11:28:51 -07009142
9143 mutex_unlock(&trace_types_lock);
9144 mutex_unlock(&event_mutex);
9145
9146 return ret;
9147}
9148EXPORT_SYMBOL_GPL(trace_array_destroy);
9149
9150static int instance_rmdir(const char *name)
9151{
9152 struct trace_array *tr;
9153 int ret;
9154
9155 mutex_lock(&event_mutex);
9156 mutex_lock(&trace_types_lock);
9157
9158 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009159 tr = trace_array_find(name);
9160 if (tr)
9161 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07009162
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009163 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04009164 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009165
9166 return ret;
9167}
9168
Steven Rostedt277ba042012-08-03 16:10:49 -04009169static __init void create_trace_instances(struct dentry *d_tracer)
9170{
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009171 struct trace_array *tr;
9172
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05009173 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9174 instance_mkdir,
9175 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009176 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04009177 return;
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009178
9179 mutex_lock(&event_mutex);
9180 mutex_lock(&trace_types_lock);
9181
9182 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9183 if (!tr->name)
9184 continue;
9185 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9186 "Failed to create instance directory\n"))
9187 break;
9188 }
9189
9190 mutex_unlock(&trace_types_lock);
9191 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04009192}
9193
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009194static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009195init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009196{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009197 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009198 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009199
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05009200 trace_create_file("available_tracers", 0444, d_tracer,
9201 tr, &show_traces_fops);
9202
9203 trace_create_file("current_tracer", 0644, d_tracer,
9204 tr, &set_tracer_fops);
9205
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009206 trace_create_file("tracing_cpumask", 0644, d_tracer,
9207 tr, &tracing_cpumask_fops);
9208
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009209 trace_create_file("trace_options", 0644, d_tracer,
9210 tr, &tracing_iter_fops);
9211
9212 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009213 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009214
9215 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02009216 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009217
9218 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02009219 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009220
9221 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9222 tr, &tracing_total_entries_fops);
9223
Wang YanQing238ae932013-05-26 16:52:01 +08009224 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009225 tr, &tracing_free_buffer_fops);
9226
9227 trace_create_file("trace_marker", 0220, d_tracer,
9228 tr, &tracing_mark_fops);
9229
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009230 file = __find_event_file(tr, "ftrace", "print");
9231 if (file && file->dir)
9232 trace_create_file("trigger", 0644, file->dir, file,
9233 &event_trigger_fops);
9234 tr->trace_marker_file = file;
9235
Steven Rostedtfa32e852016-07-06 15:25:08 -04009236 trace_create_file("trace_marker_raw", 0220, d_tracer,
9237 tr, &tracing_mark_raw_fops);
9238
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009239 trace_create_file("trace_clock", 0644, d_tracer, tr,
9240 &trace_clock_fops);
9241
9242 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009243 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009244
Tom Zanussi2c1ea602018-01-15 20:51:41 -06009245 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9246 &trace_time_stamp_mode_fops);
9247
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05009248 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05009249
9250 trace_create_file("buffer_percent", 0444, d_tracer,
9251 tr, &buffer_percent_fops);
9252
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04009253 create_trace_options_dir(tr);
9254
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04009255#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02009256 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05009257#endif
9258
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009259 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009260 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009261
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009262#ifdef CONFIG_TRACER_SNAPSHOT
9263 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009264 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009265#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009266
Tom Zanussi8a062902019-03-31 18:48:15 -05009267 trace_create_file("error_log", 0644, d_tracer,
9268 tr, &tracing_err_log_fops);
9269
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009270 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009271 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009272
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04009273 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009274}
9275
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009276static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009277{
9278 struct vfsmount *mnt;
9279 struct file_system_type *type;
9280
9281 /*
9282 * To maintain backward compatibility for tools that mount
9283 * debugfs to get to the tracing facility, tracefs is automatically
9284 * mounted to the debugfs/tracing directory.
9285 */
9286 type = get_fs_type("tracefs");
9287 if (!type)
9288 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009289 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009290 put_filesystem(type);
9291 if (IS_ERR(mnt))
9292 return NULL;
9293 mntget(mnt);
9294
9295 return mnt;
9296}
9297
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009298/**
9299 * tracing_init_dentry - initialize top level trace array
9300 *
9301 * This is called when creating files or directories in the tracing
9302 * directory. It is called via fs_initcall() by any of the boot up code
9303 * and expects to return the dentry of the top level tracing directory.
9304 */
Wei Yang22c36b12020-07-12 09:10:36 +08009305int tracing_init_dentry(void)
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009306{
9307 struct trace_array *tr = &global_trace;
9308
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009309 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009310 pr_warn("Tracing disabled due to lockdown\n");
Wei Yang22c36b12020-07-12 09:10:36 +08009311 return -EPERM;
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009312 }
9313
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009314 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009315 if (tr->dir)
Wei Yang22c36b12020-07-12 09:10:36 +08009316 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009317
Peter Enderborg072e1332020-07-16 09:15:10 +02009318 if (WARN_ON(!tracefs_initialized()))
Wei Yang22c36b12020-07-12 09:10:36 +08009319 return -ENODEV;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009320
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009321 /*
9322 * As there may still be users that expect the tracing
9323 * files to exist in debugfs/tracing, we must automount
9324 * the tracefs file system there, so older tools still
Ingo Molnarf2cc0202021-03-23 18:49:35 +01009325 * work with the newer kernel.
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009326 */
9327 tr->dir = debugfs_create_automount("tracing", NULL,
9328 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009329
Wei Yang22c36b12020-07-12 09:10:36 +08009330 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009331}
9332
Jeremy Linton00f4b652017-05-31 16:56:43 -05009333extern struct trace_eval_map *__start_ftrace_eval_maps[];
9334extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009335
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009336static struct workqueue_struct *eval_map_wq __initdata;
9337static struct work_struct eval_map_work __initdata;
9338
9339static void __init eval_map_work_func(struct work_struct *work)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009340{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009341 int len;
9342
Jeremy Linton02fd7f62017-05-31 16:56:42 -05009343 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009344 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009345}
9346
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009347static int __init trace_eval_init(void)
9348{
9349 INIT_WORK(&eval_map_work, eval_map_work_func);
9350
9351 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9352 if (!eval_map_wq) {
9353 pr_err("Unable to allocate eval_map_wq\n");
9354 /* Do work here */
9355 eval_map_work_func(&eval_map_work);
9356 return -ENOMEM;
9357 }
9358
9359 queue_work(eval_map_wq, &eval_map_work);
9360 return 0;
9361}
9362
9363static int __init trace_eval_sync(void)
9364{
9365 /* Make sure the eval map updates are finished */
9366 if (eval_map_wq)
9367 destroy_workqueue(eval_map_wq);
9368 return 0;
9369}
9370
9371late_initcall_sync(trace_eval_sync);
9372
9373
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009374#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009375static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009376{
Jeremy Linton99be6472017-05-31 16:56:44 -05009377 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009378 return;
9379
9380 /*
9381 * Modules with bad taint do not have events created, do
9382 * not bother with enums either.
9383 */
9384 if (trace_module_has_bad_taint(mod))
9385 return;
9386
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009387 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009388}
9389
Jeremy Linton681bec02017-05-31 16:56:53 -05009390#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009391static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009392{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009393 union trace_eval_map_item *map;
9394 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009395
Jeremy Linton99be6472017-05-31 16:56:44 -05009396 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009397 return;
9398
Jeremy Linton1793ed92017-05-31 16:56:46 -05009399 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009400
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009401 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009402
9403 while (map) {
9404 if (map->head.mod == mod)
9405 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009406 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009407 last = &map->tail.next;
9408 map = map->tail.next;
9409 }
9410 if (!map)
9411 goto out;
9412
Jeremy Linton5f60b352017-05-31 16:56:47 -05009413 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009414 kfree(map);
9415 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009416 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009417}
9418#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009419static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009420#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009421
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009422static int trace_module_notify(struct notifier_block *self,
9423 unsigned long val, void *data)
9424{
9425 struct module *mod = data;
9426
9427 switch (val) {
9428 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009429 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009430 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009431 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009432 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009433 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009434 }
9435
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02009436 return NOTIFY_OK;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009437}
9438
9439static struct notifier_block trace_module_nb = {
9440 .notifier_call = trace_module_notify,
9441 .priority = 0,
9442};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009443#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009444
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009445static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009446{
Wei Yang22c36b12020-07-12 09:10:36 +08009447 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009448
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009449 trace_access_lock_init();
9450
Wei Yang22c36b12020-07-12 09:10:36 +08009451 ret = tracing_init_dentry();
9452 if (ret)
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009453 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009454
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009455 event_trace_init();
9456
Wei Yang22c36b12020-07-12 09:10:36 +08009457 init_tracer_tracefs(&global_trace, NULL);
9458 ftrace_init_tracefs_toplevel(&global_trace, NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009459
Wei Yang22c36b12020-07-12 09:10:36 +08009460 trace_create_file("tracing_thresh", 0644, NULL,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009461 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009462
Wei Yang22c36b12020-07-12 09:10:36 +08009463 trace_create_file("README", 0444, NULL,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009464 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009465
Wei Yang22c36b12020-07-12 09:10:36 +08009466 trace_create_file("saved_cmdlines", 0444, NULL,
Avadh Patel69abe6a2009-04-10 16:04:48 -04009467 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009468
Wei Yang22c36b12020-07-12 09:10:36 +08009469 trace_create_file("saved_cmdlines_size", 0644, NULL,
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009470 NULL, &tracing_saved_cmdlines_size_fops);
9471
Wei Yang22c36b12020-07-12 09:10:36 +08009472 trace_create_file("saved_tgids", 0444, NULL,
Michael Sartain99c621d2017-07-05 22:07:15 -06009473 NULL, &tracing_saved_tgids_fops);
9474
Jeremy Linton5f60b352017-05-31 16:56:47 -05009475 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009476
Wei Yang22c36b12020-07-12 09:10:36 +08009477 trace_create_eval_file(NULL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009478
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009479#ifdef CONFIG_MODULES
9480 register_module_notifier(&trace_module_nb);
9481#endif
9482
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009483#ifdef CONFIG_DYNAMIC_FTRACE
Wei Yang22c36b12020-07-12 09:10:36 +08009484 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009485 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009486#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009487
Wei Yang22c36b12020-07-12 09:10:36 +08009488 create_trace_instances(NULL);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009489
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009490 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009491
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009492 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009493}
9494
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009495static int trace_panic_handler(struct notifier_block *this,
9496 unsigned long event, void *unused)
9497{
Steven Rostedt944ac422008-10-23 19:26:08 -04009498 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009499 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009500 return NOTIFY_OK;
9501}
9502
9503static struct notifier_block trace_panic_notifier = {
9504 .notifier_call = trace_panic_handler,
9505 .next = NULL,
9506 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9507};
9508
9509static int trace_die_handler(struct notifier_block *self,
9510 unsigned long val,
9511 void *data)
9512{
9513 switch (val) {
9514 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009515 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009516 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009517 break;
9518 default:
9519 break;
9520 }
9521 return NOTIFY_OK;
9522}
9523
9524static struct notifier_block trace_die_notifier = {
9525 .notifier_call = trace_die_handler,
9526 .priority = 200
9527};
9528
9529/*
9530 * printk is set to max of 1024, we really don't need it that big.
9531 * Nothing should be printing 1000 characters anyway.
9532 */
9533#define TRACE_MAX_PRINT 1000
9534
9535/*
9536 * Define here KERN_TRACE so that we have one place to modify
9537 * it if we decide to change what log level the ftrace dump
9538 * should be at.
9539 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009540#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009541
Jason Wessel955b61e2010-08-05 09:22:23 -05009542void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009543trace_printk_seq(struct trace_seq *s)
9544{
9545 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009546 if (s->seq.len >= TRACE_MAX_PRINT)
9547 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009548
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009549 /*
9550 * More paranoid code. Although the buffer size is set to
9551 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9552 * an extra layer of protection.
9553 */
9554 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9555 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009556
9557 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009558 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009559
9560 printk(KERN_TRACE "%s", s->buffer);
9561
Steven Rostedtf9520752009-03-02 14:04:40 -05009562 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009563}
9564
Jason Wessel955b61e2010-08-05 09:22:23 -05009565void trace_init_global_iter(struct trace_iterator *iter)
9566{
9567 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009568 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009569 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009570 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009571
9572 if (iter->trace && iter->trace->open)
9573 iter->trace->open(iter);
9574
9575 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009576 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009577 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9578
9579 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9580 if (trace_clocks[iter->tr->clock_id].in_ns)
9581 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009582}
9583
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009584void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009585{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009586 /* use static because iter can be a bit big for the stack */
9587 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009588 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009589 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009590 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009591 unsigned long flags;
9592 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009593
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009594 /* Only allow one dump user at a time. */
9595 if (atomic_inc_return(&dump_running) != 1) {
9596 atomic_dec(&dump_running);
9597 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009598 }
9599
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009600 /*
9601 * Always turn off tracing when we dump.
9602 * We don't need to show trace output of what happens
9603 * between multiple crashes.
9604 *
9605 * If the user does a sysrq-z, then they can re-enable
9606 * tracing with echo 1 > tracing_on.
9607 */
9608 tracing_off();
9609
9610 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009611 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009612
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009613 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009614 trace_init_global_iter(&iter);
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09009615 /* Can not use kmalloc for iter.temp and iter.fmt */
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009616 iter.temp = static_temp_buf;
9617 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09009618 iter.fmt = static_fmt_buf;
9619 iter.fmt_size = STATIC_FMT_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009620
Steven Rostedtd7690412008-10-01 00:29:53 -04009621 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009622 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009623 }
9624
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009625 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009626
Török Edwinb54d3de2008-11-22 13:28:48 +02009627 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009628 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009629
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009630 switch (oops_dump_mode) {
9631 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009632 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009633 break;
9634 case DUMP_ORIG:
9635 iter.cpu_file = raw_smp_processor_id();
9636 break;
9637 case DUMP_NONE:
9638 goto out_enable;
9639 default:
9640 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009641 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009642 }
9643
9644 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009645
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009646 /* Did function tracer already get disabled? */
9647 if (ftrace_is_dead()) {
9648 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9649 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9650 }
9651
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009652 /*
Randy Dunlap5c8c2062020-08-06 20:32:59 -07009653 * We need to stop all tracing on all CPUS to read
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009654 * the next buffer. This is a bit expensive, but is
9655 * not done often. We fill all what we can read,
9656 * and then release the locks again.
9657 */
9658
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009659 while (!trace_empty(&iter)) {
9660
9661 if (!cnt)
9662 printk(KERN_TRACE "---------------------------------\n");
9663
9664 cnt++;
9665
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009666 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009667 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009668
Jason Wessel955b61e2010-08-05 09:22:23 -05009669 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009670 int ret;
9671
9672 ret = print_trace_line(&iter);
9673 if (ret != TRACE_TYPE_NO_CONSUME)
9674 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009675 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009676 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009677
9678 trace_printk_seq(&iter.seq);
9679 }
9680
9681 if (!cnt)
9682 printk(KERN_TRACE " (ftrace buffer empty)\n");
9683 else
9684 printk(KERN_TRACE "---------------------------------\n");
9685
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009686 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009687 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009688
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009689 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009690 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009691 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009692 atomic_dec(&dump_running);
9693 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009694 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009695}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009696EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009697
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009698#define WRITE_BUFSIZE 4096
9699
9700ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9701 size_t count, loff_t *ppos,
Masami Hiramatsud2622712021-02-01 13:48:11 -06009702 int (*createfn)(const char *))
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009703{
9704 char *kbuf, *buf, *tmp;
9705 int ret = 0;
9706 size_t done = 0;
9707 size_t size;
9708
9709 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9710 if (!kbuf)
9711 return -ENOMEM;
9712
9713 while (done < count) {
9714 size = count - done;
9715
9716 if (size >= WRITE_BUFSIZE)
9717 size = WRITE_BUFSIZE - 1;
9718
9719 if (copy_from_user(kbuf, buffer + done, size)) {
9720 ret = -EFAULT;
9721 goto out;
9722 }
9723 kbuf[size] = '\0';
9724 buf = kbuf;
9725 do {
9726 tmp = strchr(buf, '\n');
9727 if (tmp) {
9728 *tmp = '\0';
9729 size = tmp - buf + 1;
9730 } else {
9731 size = strlen(buf);
9732 if (done + size < count) {
9733 if (buf != kbuf)
9734 break;
9735 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9736 pr_warn("Line length is too long: Should be less than %d\n",
9737 WRITE_BUFSIZE - 2);
9738 ret = -EINVAL;
9739 goto out;
9740 }
9741 }
9742 done += size;
9743
9744 /* Remove comments */
9745 tmp = strchr(buf, '#');
9746
9747 if (tmp)
9748 *tmp = '\0';
9749
Masami Hiramatsud2622712021-02-01 13:48:11 -06009750 ret = createfn(buf);
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009751 if (ret)
9752 goto out;
9753 buf += size;
9754
9755 } while (done < count);
9756 }
9757 ret = done;
9758
9759out:
9760 kfree(kbuf);
9761
9762 return ret;
9763}
9764
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009765__init static int tracer_alloc_buffers(void)
9766{
Steven Rostedt73c51622009-03-11 13:42:01 -04009767 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309768 int ret = -ENOMEM;
9769
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009770
9771 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009772 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009773 return -EPERM;
9774 }
9775
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009776 /*
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009777 * Make sure we don't accidentally add more trace options
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009778 * than we have bits for.
9779 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009780 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009781
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309782 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9783 goto out;
9784
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009785 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309786 goto out_free_buffer_mask;
9787
Steven Rostedt07d777f2011-09-22 14:01:55 -04009788 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -07009789 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009790 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009791 trace_printk_init_buffers();
9792
Steven Rostedt73c51622009-03-11 13:42:01 -04009793 /* To save memory, keep the ring buffer size to its minimum */
9794 if (ring_buffer_expanded)
9795 ring_buf_size = trace_buf_size;
9796 else
9797 ring_buf_size = 1;
9798
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309799 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009800 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009801
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009802 raw_spin_lock_init(&global_trace.start_lock);
9803
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009804 /*
9805 * The prepare callbacks allocates some memory for the ring buffer. We
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009806 * don't free the buffer if the CPU goes down. If we were to free
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009807 * the buffer, then the user would lose any trace that was in the
9808 * buffer. The memory will be removed once the "instance" is removed.
9809 */
9810 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9811 "trace/RB:preapre", trace_rb_cpu_prepare,
9812 NULL);
9813 if (ret < 0)
9814 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009815 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009816 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009817 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9818 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009819 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009820
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009821 if (trace_create_savedcmd() < 0)
9822 goto out_free_temp_buffer;
9823
Steven Rostedtab464282008-05-12 21:21:00 +02009824 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009825 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009826 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009827 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009828 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009829
Steven Rostedt499e5472012-02-22 15:50:28 -05009830 if (global_trace.buffer_disabled)
9831 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009832
Steven Rostedte1e232c2014-02-10 23:38:46 -05009833 if (trace_boot_clock) {
9834 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9835 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009836 pr_warn("Trace clock %s not defined, going back to default\n",
9837 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009838 }
9839
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009840 /*
9841 * register_tracer() might reference current_trace, so it
9842 * needs to be set before we register anything. This is
9843 * just a bootstrap of current_trace anyway.
9844 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009845 global_trace.current_trace = &nop_trace;
9846
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009847 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9848
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009849 ftrace_init_global_array_ops(&global_trace);
9850
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009851 init_trace_flags_index(&global_trace);
9852
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009853 register_tracer(&nop_trace);
9854
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009855 /* Function tracing may start here (via kernel command line) */
9856 init_function_trace();
9857
Steven Rostedt60a11772008-05-12 21:20:44 +02009858 /* All seems OK, enable tracing */
9859 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009860
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009861 atomic_notifier_chain_register(&panic_notifier_list,
9862 &trace_panic_notifier);
9863
9864 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009865
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009866 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9867
9868 INIT_LIST_HEAD(&global_trace.systems);
9869 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009870 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009871 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009872 list_add(&global_trace.list, &ftrace_trace_arrays);
9873
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009874 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009875
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009876 register_snapshot_cmd();
9877
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05009878 test_can_verify();
9879
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009880 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009881
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009882out_free_savedcmd:
9883 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009884out_free_temp_buffer:
9885 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009886out_rm_hp_state:
9887 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309888out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009889 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309890out_free_buffer_mask:
9891 free_cpumask_var(tracing_buffer_mask);
9892out:
9893 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009894}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009895
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009896void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009897{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009898 if (tracepoint_printk) {
9899 tracepoint_print_iter =
9900 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009901 if (MEM_FAIL(!tracepoint_print_iter,
9902 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009903 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009904 else
9905 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009906 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009907 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009908}
9909
9910void __init trace_init(void)
9911{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009912 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009913}
9914
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009915__init static int clear_boot_tracer(void)
9916{
9917 /*
9918 * The default tracer at boot buffer is an init section.
9919 * This function is called in lateinit. If we did not
9920 * find the boot tracer, then clear it out, to prevent
9921 * later registration from accessing the buffer that is
9922 * about to be freed.
9923 */
9924 if (!default_bootup_tracer)
9925 return 0;
9926
9927 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9928 default_bootup_tracer);
9929 default_bootup_tracer = NULL;
9930
9931 return 0;
9932}
9933
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009934fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009935late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009936
9937#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9938__init static int tracing_set_default_clock(void)
9939{
9940 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009941 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +09009942 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9943 pr_warn("Can not set tracing clock due to lockdown\n");
9944 return -EPERM;
9945 }
9946
Chris Wilson3fd49c92018-03-30 16:01:31 +01009947 printk(KERN_WARNING
9948 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9949 "If you want to keep using the local clock, then add:\n"
9950 " \"trace_clock=local\"\n"
9951 "on the kernel command line\n");
9952 tracing_set_clock(&global_trace, "global");
9953 }
9954
9955 return 0;
9956}
9957late_initcall_sync(tracing_set_default_clock);
9958#endif