blob: a21ef9cd2aae2d1ee002fa0e3463f171d0a6dfe2 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020042#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050043#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020044#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080045#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010046#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060047#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020048#include <linux/fsnotify.h>
49#include <linux/irq_work.h>
50#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020051
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020052#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050053#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020054
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055/*
Steven Rostedt73c51622009-03-11 13:42:01 -040056 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
58 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050059bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040060
61/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010062 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010063 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010065 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010066 * at the same time, giving false positive or negative results.
67 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010068static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010069
Steven Rostedtb2821ae2009-02-02 21:38:32 -050070/*
Masami Hiramatsu60efe212020-12-08 17:54:09 +090071 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
Steven Rostedtb2821ae2009-02-02 21:38:32 -050073 */
Li Zefan020e5f82009-07-01 10:47:05 +080074bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050075
Masami Hiramatsu60efe212020-12-08 17:54:09 +090076#ifdef CONFIG_FTRACE_STARTUP_TEST
77void __init disable_tracing_selftest(const char *reason)
78{
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
82 }
83}
84#endif
85
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050086/* Pipe tracepoints to printk */
87struct trace_iterator *tracepoint_print_iter;
88int tracepoint_printk;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050089static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050090
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010091/* For tracers that don't implement custom flags */
92static struct tracer_opt dummy_tracer_opt[] = {
93 { }
94};
95
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050096static int
97dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010098{
99 return 0;
100}
Steven Rostedt0f048702008-11-05 16:05:44 -0500101
102/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
105 * occurred.
106 */
Joel Fernandesd914ba32017-06-26 19:01:55 -0700107static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400108
109/*
Steven Rostedt0f048702008-11-05 16:05:44 -0500110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
113 * this back to zero.
114 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100115static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500116
Jason Wessel955b61e2010-08-05 09:22:23 -0500117cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200118
Steven Rostedt944ac422008-10-23 19:26:08 -0400119/*
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
121 *
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
126 * serial console.
127 *
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400133 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200134
135enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400136
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400137/* When set, tracing will stop when a WARN*() is hit */
138int __disable_trace_on_warning;
139
Jeremy Linton681bec02017-05-31 16:56:53 -0500140#ifdef CONFIG_TRACE_EVAL_MAP_FILE
141/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500142struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400143 struct module *mod;
144 unsigned long length;
145};
146
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500147union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400148
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500149struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400150 /*
151 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500152 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400153 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500154 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 const char *end; /* points to NULL */
156};
157
Jeremy Linton1793ed92017-05-31 16:56:46 -0500158static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400159
160/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500161 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500165 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400166 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500167union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500168 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500169 struct trace_eval_map_head head;
170 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400171};
172
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500173static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500174#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400175
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900176int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -0500177static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100179 unsigned int trace_ctx);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500180
Li Zefanee6c2c12009-09-18 14:06:47 +0800181#define MAX_TRACER_SIZE 100
182static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500183static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100184
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500185static bool allocate_snapshot;
186
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200187static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100188{
Chen Gang67012ab2013-04-08 12:06:44 +0800189 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500190 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400191 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500192 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100193 return 1;
194}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200195__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100196
Steven Rostedt944ac422008-10-23 19:26:08 -0400197static int __init set_ftrace_dump_on_oops(char *str)
198{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200199 if (*str++ != '=' || !*str) {
200 ftrace_dump_on_oops = DUMP_ALL;
201 return 1;
202 }
203
204 if (!strcmp("orig_cpu", str)) {
205 ftrace_dump_on_oops = DUMP_ORIG;
206 return 1;
207 }
208
209 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400210}
211__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200212
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400213static int __init stop_trace_on_warning(char *str)
214{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200215 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
216 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400217 return 1;
218}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200219__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400220
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400221static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500222{
223 allocate_snapshot = true;
224 /* We also need the main ring buffer expanded */
225 ring_buffer_expanded = true;
226 return 1;
227}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400228__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500229
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400230
231static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400232
233static int __init set_trace_boot_options(char *str)
234{
Chen Gang67012ab2013-04-08 12:06:44 +0800235 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400236 return 0;
237}
238__setup("trace_options=", set_trace_boot_options);
239
Steven Rostedte1e232c2014-02-10 23:38:46 -0500240static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
241static char *trace_boot_clock __initdata;
242
243static int __init set_trace_boot_clock(char *str)
244{
245 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
246 trace_boot_clock = trace_boot_clock_buf;
247 return 0;
248}
249__setup("trace_clock=", set_trace_boot_clock);
250
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500251static int __init set_tracepoint_printk(char *str)
252{
253 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
254 tracepoint_printk = 1;
255 return 1;
256}
257__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400258
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100259unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200260{
261 nsec += 500;
262 do_div(nsec, 1000);
263 return nsec;
264}
265
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300266static void
267trace_process_export(struct trace_export *export,
268 struct ring_buffer_event *event, int flag)
269{
270 struct trace_entry *entry;
271 unsigned int size = 0;
272
273 if (export->flags & flag) {
274 entry = ring_buffer_event_data(event);
275 size = ring_buffer_event_length(event);
276 export->write(export, entry, size);
277 }
278}
279
280static DEFINE_MUTEX(ftrace_export_lock);
281
282static struct trace_export __rcu *ftrace_exports_list __read_mostly;
283
284static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
285static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300286static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300287
288static inline void ftrace_exports_enable(struct trace_export *export)
289{
290 if (export->flags & TRACE_EXPORT_FUNCTION)
291 static_branch_inc(&trace_function_exports_enabled);
292
293 if (export->flags & TRACE_EXPORT_EVENT)
294 static_branch_inc(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300295
296 if (export->flags & TRACE_EXPORT_MARKER)
297 static_branch_inc(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300298}
299
300static inline void ftrace_exports_disable(struct trace_export *export)
301{
302 if (export->flags & TRACE_EXPORT_FUNCTION)
303 static_branch_dec(&trace_function_exports_enabled);
304
305 if (export->flags & TRACE_EXPORT_EVENT)
306 static_branch_dec(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300307
308 if (export->flags & TRACE_EXPORT_MARKER)
309 static_branch_dec(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300310}
311
312static void ftrace_exports(struct ring_buffer_event *event, int flag)
313{
314 struct trace_export *export;
315
316 preempt_disable_notrace();
317
318 export = rcu_dereference_raw_check(ftrace_exports_list);
319 while (export) {
320 trace_process_export(export, event, flag);
321 export = rcu_dereference_raw_check(export->next);
322 }
323
324 preempt_enable_notrace();
325}
326
327static inline void
328add_trace_export(struct trace_export **list, struct trace_export *export)
329{
330 rcu_assign_pointer(export->next, *list);
331 /*
332 * We are entering export into the list but another
333 * CPU might be walking that list. We need to make sure
334 * the export->next pointer is valid before another CPU sees
335 * the export pointer included into the list.
336 */
337 rcu_assign_pointer(*list, export);
338}
339
340static inline int
341rm_trace_export(struct trace_export **list, struct trace_export *export)
342{
343 struct trace_export **p;
344
345 for (p = list; *p != NULL; p = &(*p)->next)
346 if (*p == export)
347 break;
348
349 if (*p != export)
350 return -1;
351
352 rcu_assign_pointer(*p, (*p)->next);
353
354 return 0;
355}
356
357static inline void
358add_ftrace_export(struct trace_export **list, struct trace_export *export)
359{
360 ftrace_exports_enable(export);
361
362 add_trace_export(list, export);
363}
364
365static inline int
366rm_ftrace_export(struct trace_export **list, struct trace_export *export)
367{
368 int ret;
369
370 ret = rm_trace_export(list, export);
371 ftrace_exports_disable(export);
372
373 return ret;
374}
375
376int register_ftrace_export(struct trace_export *export)
377{
378 if (WARN_ON_ONCE(!export->write))
379 return -1;
380
381 mutex_lock(&ftrace_export_lock);
382
383 add_ftrace_export(&ftrace_exports_list, export);
384
385 mutex_unlock(&ftrace_export_lock);
386
387 return 0;
388}
389EXPORT_SYMBOL_GPL(register_ftrace_export);
390
391int unregister_ftrace_export(struct trace_export *export)
392{
393 int ret;
394
395 mutex_lock(&ftrace_export_lock);
396
397 ret = rm_ftrace_export(&ftrace_exports_list, export);
398
399 mutex_unlock(&ftrace_export_lock);
400
401 return ret;
402}
403EXPORT_SYMBOL_GPL(unregister_ftrace_export);
404
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400405/* trace_flags holds trace_options default values */
406#define TRACE_DEFAULT_FLAGS \
407 (FUNCTION_DEFAULT_FLAGS | \
408 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
409 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
410 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
Steven Rostedt (VMware)99e22ce2021-02-12 11:51:06 -0500411 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
412 TRACE_ITER_HASH_PTR)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400413
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400414/* trace_options that are only supported by global_trace */
415#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
416 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
417
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400418/* trace_flags that are default zero for instances */
419#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900420 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400421
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200422/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800423 * The global_trace is the descriptor that holds the top-level tracing
424 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200425 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400426static struct trace_array global_trace = {
427 .trace_flags = TRACE_DEFAULT_FLAGS,
428};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200429
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400430LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200431
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400432int trace_array_get(struct trace_array *this_tr)
433{
434 struct trace_array *tr;
435 int ret = -ENODEV;
436
437 mutex_lock(&trace_types_lock);
438 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
439 if (tr == this_tr) {
440 tr->ref++;
441 ret = 0;
442 break;
443 }
444 }
445 mutex_unlock(&trace_types_lock);
446
447 return ret;
448}
449
450static void __trace_array_put(struct trace_array *this_tr)
451{
452 WARN_ON(!this_tr->ref);
453 this_tr->ref--;
454}
455
Divya Indi28879782019-11-20 11:08:38 -0800456/**
457 * trace_array_put - Decrement the reference counter for this trace array.
Bean Huo557d50e2021-01-12 12:12:02 +0100458 * @this_tr : pointer to the trace array
Divya Indi28879782019-11-20 11:08:38 -0800459 *
460 * NOTE: Use this when we no longer need the trace array returned by
461 * trace_array_get_by_name(). This ensures the trace array can be later
462 * destroyed.
463 *
464 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400465void trace_array_put(struct trace_array *this_tr)
466{
Divya Indi28879782019-11-20 11:08:38 -0800467 if (!this_tr)
468 return;
469
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400470 mutex_lock(&trace_types_lock);
471 __trace_array_put(this_tr);
472 mutex_unlock(&trace_types_lock);
473}
Divya Indi28879782019-11-20 11:08:38 -0800474EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400475
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400476int tracing_check_open_get_tr(struct trace_array *tr)
477{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400478 int ret;
479
480 ret = security_locked_down(LOCKDOWN_TRACEFS);
481 if (ret)
482 return ret;
483
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400484 if (tracing_disabled)
485 return -ENODEV;
486
487 if (tr && trace_array_get(tr) < 0)
488 return -ENODEV;
489
490 return 0;
491}
492
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400493int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500494 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500495 struct ring_buffer_event *event)
496{
497 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
498 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400499 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500500 return 1;
501 }
502
503 return 0;
504}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500505
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400506void trace_free_pid_list(struct trace_pid_list *pid_list)
507{
508 vfree(pid_list->pids);
509 kfree(pid_list);
510}
511
Steven Rostedtd8275c42016-04-14 12:15:22 -0400512/**
513 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
514 * @filtered_pids: The list of pids to check
515 * @search_pid: The PID to find in @filtered_pids
516 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100517 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400518 */
519bool
520trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
521{
522 /*
523 * If pid_max changed after filtered_pids was created, we
524 * by default ignore all pids greater than the previous pid_max.
525 */
526 if (search_pid >= filtered_pids->pid_max)
527 return false;
528
529 return test_bit(search_pid, filtered_pids->pids);
530}
531
532/**
533 * trace_ignore_this_task - should a task be ignored for tracing
534 * @filtered_pids: The list of pids to check
Qiujun Huangb3ca59f2020-12-31 10:35:58 -0500535 * @filtered_no_pids: The list of pids not to be traced
Steven Rostedtd8275c42016-04-14 12:15:22 -0400536 * @task: The task that should be ignored if not filtered
537 *
538 * Checks if @task should be traced or not from @filtered_pids.
539 * Returns true if @task should *NOT* be traced.
540 * Returns false if @task should be traced.
541 */
542bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400543trace_ignore_this_task(struct trace_pid_list *filtered_pids,
544 struct trace_pid_list *filtered_no_pids,
545 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400546{
547 /*
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100548 * If filtered_no_pids is not empty, and the task's pid is listed
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400549 * in filtered_no_pids, then return true.
550 * Otherwise, if filtered_pids is empty, that means we can
551 * trace all tasks. If it has content, then only trace pids
552 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400553 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400554
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400555 return (filtered_pids &&
556 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
557 (filtered_no_pids &&
558 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400559}
560
561/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700562 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400563 * @pid_list: The list to modify
564 * @self: The current task for fork or NULL for exit
565 * @task: The task to add or remove
566 *
567 * If adding a task, if @self is defined, the task is only added if @self
568 * is also included in @pid_list. This happens on fork and tasks should
569 * only be added when the parent is listed. If @self is NULL, then the
570 * @task pid will be removed from the list, which would happen on exit
571 * of a task.
572 */
573void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
574 struct task_struct *self,
575 struct task_struct *task)
576{
577 if (!pid_list)
578 return;
579
580 /* For forks, we only add if the forking task is listed */
581 if (self) {
582 if (!trace_find_filtered_pid(pid_list, self->pid))
583 return;
584 }
585
586 /* Sorry, but we don't support pid_max changing after setting */
587 if (task->pid >= pid_list->pid_max)
588 return;
589
590 /* "self" is set for forks, and NULL for exits */
591 if (self)
592 set_bit(task->pid, pid_list->pids);
593 else
594 clear_bit(task->pid, pid_list->pids);
595}
596
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400597/**
598 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
599 * @pid_list: The pid list to show
600 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
601 * @pos: The position of the file
602 *
603 * This is used by the seq_file "next" operation to iterate the pids
604 * listed in a trace_pid_list structure.
605 *
606 * Returns the pid+1 as we want to display pid of zero, but NULL would
607 * stop the iteration.
608 */
609void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
610{
611 unsigned long pid = (unsigned long)v;
612
613 (*pos)++;
614
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100615 /* pid already is +1 of the actual previous bit */
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400616 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
617
618 /* Return pid + 1 to allow zero to be represented */
619 if (pid < pid_list->pid_max)
620 return (void *)(pid + 1);
621
622 return NULL;
623}
624
625/**
626 * trace_pid_start - Used for seq_file to start reading pid lists
627 * @pid_list: The pid list to show
628 * @pos: The position of the file
629 *
630 * This is used by seq_file "start" operation to start the iteration
631 * of listing pids.
632 *
633 * Returns the pid+1 as we want to display pid of zero, but NULL would
634 * stop the iteration.
635 */
636void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
637{
638 unsigned long pid;
639 loff_t l = 0;
640
641 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
642 if (pid >= pid_list->pid_max)
643 return NULL;
644
645 /* Return pid + 1 so that zero can be the exit value */
646 for (pid++; pid && l < *pos;
647 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
648 ;
649 return (void *)pid;
650}
651
652/**
653 * trace_pid_show - show the current pid in seq_file processing
654 * @m: The seq_file structure to write into
655 * @v: A void pointer of the pid (+1) value to display
656 *
657 * Can be directly used by seq_file operations to display the current
658 * pid value.
659 */
660int trace_pid_show(struct seq_file *m, void *v)
661{
662 unsigned long pid = (unsigned long)v - 1;
663
664 seq_printf(m, "%lu\n", pid);
665 return 0;
666}
667
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400668/* 128 should be much more than enough */
669#define PID_BUF_SIZE 127
670
671int trace_pid_write(struct trace_pid_list *filtered_pids,
672 struct trace_pid_list **new_pid_list,
673 const char __user *ubuf, size_t cnt)
674{
675 struct trace_pid_list *pid_list;
676 struct trace_parser parser;
677 unsigned long val;
678 int nr_pids = 0;
679 ssize_t read = 0;
680 ssize_t ret = 0;
681 loff_t pos;
682 pid_t pid;
683
684 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
685 return -ENOMEM;
686
687 /*
688 * Always recreate a new array. The write is an all or nothing
689 * operation. Always create a new array when adding new pids by
690 * the user. If the operation fails, then the current list is
691 * not modified.
692 */
693 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
Wenwen Wang91862cc2019-04-19 21:22:59 -0500694 if (!pid_list) {
695 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400696 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500697 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400698
699 pid_list->pid_max = READ_ONCE(pid_max);
700
701 /* Only truncating will shrink pid_max */
702 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
703 pid_list->pid_max = filtered_pids->pid_max;
704
705 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
706 if (!pid_list->pids) {
Wenwen Wang91862cc2019-04-19 21:22:59 -0500707 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400708 kfree(pid_list);
709 return -ENOMEM;
710 }
711
712 if (filtered_pids) {
713 /* copy the current bits to the new max */
Wei Yongjun67f20b02016-07-04 15:10:04 +0000714 for_each_set_bit(pid, filtered_pids->pids,
715 filtered_pids->pid_max) {
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400716 set_bit(pid, pid_list->pids);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400717 nr_pids++;
718 }
719 }
720
721 while (cnt > 0) {
722
723 pos = 0;
724
725 ret = trace_get_user(&parser, ubuf, cnt, &pos);
726 if (ret < 0 || !trace_parser_loaded(&parser))
727 break;
728
729 read += ret;
730 ubuf += ret;
731 cnt -= ret;
732
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400733 ret = -EINVAL;
734 if (kstrtoul(parser.buffer, 0, &val))
735 break;
736 if (val >= pid_list->pid_max)
737 break;
738
739 pid = (pid_t)val;
740
741 set_bit(pid, pid_list->pids);
742 nr_pids++;
743
744 trace_parser_clear(&parser);
745 ret = 0;
746 }
747 trace_parser_put(&parser);
748
749 if (ret < 0) {
750 trace_free_pid_list(pid_list);
751 return ret;
752 }
753
754 if (!nr_pids) {
755 /* Cleared the list of pids */
756 trace_free_pid_list(pid_list);
757 read = ret;
758 pid_list = NULL;
759 }
760
761 *new_pid_list = pid_list;
762
763 return read;
764}
765
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500766static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400767{
768 u64 ts;
769
770 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700771 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400772 return trace_clock_local();
773
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +0300774 ts = ring_buffer_time_stamp(buf->buffer);
Alexander Z Lam94571582013-08-02 18:36:16 -0700775 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400776
777 return ts;
778}
779
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100780u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700781{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500782 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700783}
784
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785/**
Qiujun Huangb3ca59f2020-12-31 10:35:58 -0500786 * tracing_is_enabled - Show if global_trace has been enabled
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400787 *
788 * Shows if the global trace has been enabled or not. It uses the
789 * mirror flag "buffer_disabled" to be used in fast paths such as for
790 * the irqsoff tracer. But it may be inaccurate due to races. If you
791 * need to know the accurate state, use tracing_is_on() which is a little
792 * slower, but accurate.
793 */
Steven Rostedt90369902008-11-05 16:05:44 -0500794int tracing_is_enabled(void)
795{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400796 /*
797 * For quick access (irqsoff uses this in fast path), just
798 * return the mirror variable of the state of the ring buffer.
799 * It's a little racy, but we don't really care.
800 */
801 smp_rmb();
802 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500803}
804
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200805/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400806 * trace_buf_size is the size in bytes that is allocated
807 * for a buffer. Note, the number of bytes is always rounded
808 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400809 *
810 * This number is purposely set to a low number of 16384.
811 * If the dump on oops happens, it will be much appreciated
812 * to not have to wait for all that output. Anyway this can be
813 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200814 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400815#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400816
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400817static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200818
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200819/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200820static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200821
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200822/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200823 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200824 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700825DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200826
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800827/*
828 * serialize the access of the ring buffer
829 *
830 * ring buffer serializes readers, but it is low level protection.
831 * The validity of the events (which returns by ring_buffer_peek() ..etc)
832 * are not protected by ring buffer.
833 *
834 * The content of events may become garbage if we allow other process consumes
835 * these events concurrently:
836 * A) the page of the consumed events may become a normal page
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100837 * (not reader page) in ring buffer, and this page will be rewritten
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800838 * by events producer.
839 * B) The page of the consumed events may become a page for splice_read,
840 * and this page will be returned to system.
841 *
842 * These primitives allow multi process access to different cpu ring buffer
843 * concurrently.
844 *
845 * These primitives don't distinguish read-only and read-consume access.
846 * Multi read-only access are also serialized.
847 */
848
849#ifdef CONFIG_SMP
850static DECLARE_RWSEM(all_cpu_access_lock);
851static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
852
853static inline void trace_access_lock(int cpu)
854{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500855 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800856 /* gain it for accessing the whole ring buffer. */
857 down_write(&all_cpu_access_lock);
858 } else {
859 /* gain it for accessing a cpu ring buffer. */
860
Steven Rostedtae3b5092013-01-23 15:22:59 -0500861 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800862 down_read(&all_cpu_access_lock);
863
864 /* Secondly block other access to this @cpu ring buffer. */
865 mutex_lock(&per_cpu(cpu_access_lock, cpu));
866 }
867}
868
869static inline void trace_access_unlock(int cpu)
870{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500871 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800872 up_write(&all_cpu_access_lock);
873 } else {
874 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
875 up_read(&all_cpu_access_lock);
876 }
877}
878
879static inline void trace_access_lock_init(void)
880{
881 int cpu;
882
883 for_each_possible_cpu(cpu)
884 mutex_init(&per_cpu(cpu_access_lock, cpu));
885}
886
887#else
888
889static DEFINE_MUTEX(access_lock);
890
891static inline void trace_access_lock(int cpu)
892{
893 (void)cpu;
894 mutex_lock(&access_lock);
895}
896
897static inline void trace_access_unlock(int cpu)
898{
899 (void)cpu;
900 mutex_unlock(&access_lock);
901}
902
903static inline void trace_access_lock_init(void)
904{
905}
906
907#endif
908
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400909#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500910static void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100911 unsigned int trace_ctx,
912 int skip, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400913static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500914 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100915 unsigned int trace_ctx,
916 int skip, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400917
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400918#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500919static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100920 unsigned int trace_ctx,
921 int skip, struct pt_regs *regs)
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400922{
923}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400924static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500925 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100926 unsigned long trace_ctx,
927 int skip, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400928{
929}
930
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400931#endif
932
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500933static __always_inline void
934trace_event_setup(struct ring_buffer_event *event,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100935 int type, unsigned int trace_ctx)
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500936{
937 struct trace_entry *ent = ring_buffer_event_data(event);
938
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100939 tracing_generic_entry_update(ent, type, trace_ctx);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500940}
941
942static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500943__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500944 int type,
945 unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100946 unsigned int trace_ctx)
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500947{
948 struct ring_buffer_event *event;
949
950 event = ring_buffer_lock_reserve(buffer, len);
951 if (event != NULL)
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100952 trace_event_setup(event, type, trace_ctx);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500953
954 return event;
955}
956
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400957void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400958{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500959 if (tr->array_buffer.buffer)
960 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400961 /*
962 * This flag is looked at when buffers haven't been allocated
963 * yet, or by some tracers (like irqsoff), that just want to
964 * know if the ring buffer has been disabled, but it can handle
965 * races of where it gets disabled but we still do a record.
966 * As the check is in the fast path of the tracers, it is more
967 * important to be fast than accurate.
968 */
969 tr->buffer_disabled = 0;
970 /* Make the flag seen by readers */
971 smp_wmb();
972}
973
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200974/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500975 * tracing_on - enable tracing buffers
976 *
977 * This function enables tracing buffers that may have been
978 * disabled with tracing_off.
979 */
980void tracing_on(void)
981{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400982 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500983}
984EXPORT_SYMBOL_GPL(tracing_on);
985
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500986
987static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500988__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500989{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700990 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500991
992 /* If this is the temp buffer, we need to commit fully */
993 if (this_cpu_read(trace_buffered_event) == event) {
994 /* Length is in event->array[0] */
995 ring_buffer_write(buffer, event->array[0], &event->array[1]);
996 /* Release the temp buffer */
997 this_cpu_dec(trace_buffered_event_cnt);
998 } else
999 ring_buffer_unlock_commit(buffer, event);
1000}
1001
Steven Rostedt499e5472012-02-22 15:50:28 -05001002/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001003 * __trace_puts - write a constant string into the trace buffer.
1004 * @ip: The address of the caller
1005 * @str: The constant string to write
1006 * @size: The size of the string.
1007 */
1008int __trace_puts(unsigned long ip, const char *str, int size)
1009{
1010 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001011 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001012 struct print_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001013 unsigned int trace_ctx;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001014 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001015
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001016 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001017 return 0;
1018
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001019 if (unlikely(tracing_selftest_running || tracing_disabled))
1020 return 0;
1021
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001022 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1023
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001024 trace_ctx = tracing_gen_ctx();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001025 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001026 ring_buffer_nest_start(buffer);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001027 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1028 trace_ctx);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001029 if (!event) {
1030 size = 0;
1031 goto out;
1032 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001033
1034 entry = ring_buffer_event_data(event);
1035 entry->ip = ip;
1036
1037 memcpy(&entry->buf, str, size);
1038
1039 /* Add a newline if necessary */
1040 if (entry->buf[size - 1] != '\n') {
1041 entry->buf[size] = '\n';
1042 entry->buf[size + 1] = '\0';
1043 } else
1044 entry->buf[size] = '\0';
1045
1046 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001047 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001048 out:
1049 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001050 return size;
1051}
1052EXPORT_SYMBOL_GPL(__trace_puts);
1053
1054/**
1055 * __trace_bputs - write the pointer to a constant string into trace buffer
1056 * @ip: The address of the caller
1057 * @str: The constant string to write to the buffer to
1058 */
1059int __trace_bputs(unsigned long ip, const char *str)
1060{
1061 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001062 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001063 struct bputs_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001064 unsigned int trace_ctx;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001065 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001066 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001067
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001068 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001069 return 0;
1070
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001071 if (unlikely(tracing_selftest_running || tracing_disabled))
1072 return 0;
1073
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001074 trace_ctx = tracing_gen_ctx();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001075 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001076
1077 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001078 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001079 trace_ctx);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001080 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001081 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001082
1083 entry = ring_buffer_event_data(event);
1084 entry->ip = ip;
1085 entry->str = str;
1086
1087 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001088 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001089
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001090 ret = 1;
1091 out:
1092 ring_buffer_nest_end(buffer);
1093 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001094}
1095EXPORT_SYMBOL_GPL(__trace_bputs);
1096
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001097#ifdef CONFIG_TRACER_SNAPSHOT
Zou Wei192b7992020-04-23 12:08:25 +08001098static void tracing_snapshot_instance_cond(struct trace_array *tr,
1099 void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001100{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001101 struct tracer *tracer = tr->current_trace;
1102 unsigned long flags;
1103
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001104 if (in_nmi()) {
1105 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1106 internal_trace_puts("*** snapshot is being ignored ***\n");
1107 return;
1108 }
1109
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001110 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001111 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1112 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001113 tracing_off();
1114 return;
1115 }
1116
1117 /* Note, snapshot can not be used when the tracer uses it */
1118 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001119 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1120 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001121 return;
1122 }
1123
1124 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -06001125 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001126 local_irq_restore(flags);
1127}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001128
Tom Zanussia35873a2019-02-13 17:42:45 -06001129void tracing_snapshot_instance(struct trace_array *tr)
1130{
1131 tracing_snapshot_instance_cond(tr, NULL);
1132}
1133
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001134/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001135 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001136 *
1137 * This causes a swap between the snapshot buffer and the current live
1138 * tracing buffer. You can use this to take snapshots of the live
1139 * trace when some condition is triggered, but continue to trace.
1140 *
1141 * Note, make sure to allocate the snapshot with either
1142 * a tracing_snapshot_alloc(), or by doing it manually
1143 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1144 *
1145 * If the snapshot buffer is not allocated, it will stop tracing.
1146 * Basically making a permanent snapshot.
1147 */
1148void tracing_snapshot(void)
1149{
1150 struct trace_array *tr = &global_trace;
1151
1152 tracing_snapshot_instance(tr);
1153}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001154EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001155
Tom Zanussia35873a2019-02-13 17:42:45 -06001156/**
1157 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1158 * @tr: The tracing instance to snapshot
1159 * @cond_data: The data to be tested conditionally, and possibly saved
1160 *
1161 * This is the same as tracing_snapshot() except that the snapshot is
1162 * conditional - the snapshot will only happen if the
1163 * cond_snapshot.update() implementation receiving the cond_data
1164 * returns true, which means that the trace array's cond_snapshot
1165 * update() operation used the cond_data to determine whether the
1166 * snapshot should be taken, and if it was, presumably saved it along
1167 * with the snapshot.
1168 */
1169void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1170{
1171 tracing_snapshot_instance_cond(tr, cond_data);
1172}
1173EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1174
1175/**
1176 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1177 * @tr: The tracing instance
1178 *
1179 * When the user enables a conditional snapshot using
1180 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1181 * with the snapshot. This accessor is used to retrieve it.
1182 *
1183 * Should not be called from cond_snapshot.update(), since it takes
1184 * the tr->max_lock lock, which the code calling
1185 * cond_snapshot.update() has already done.
1186 *
1187 * Returns the cond_data associated with the trace array's snapshot.
1188 */
1189void *tracing_cond_snapshot_data(struct trace_array *tr)
1190{
1191 void *cond_data = NULL;
1192
1193 arch_spin_lock(&tr->max_lock);
1194
1195 if (tr->cond_snapshot)
1196 cond_data = tr->cond_snapshot->cond_data;
1197
1198 arch_spin_unlock(&tr->max_lock);
1199
1200 return cond_data;
1201}
1202EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1203
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001204static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1205 struct array_buffer *size_buf, int cpu_id);
1206static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001207
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001208int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001209{
1210 int ret;
1211
1212 if (!tr->allocated_snapshot) {
1213
1214 /* allocate spare buffer */
1215 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001216 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001217 if (ret < 0)
1218 return ret;
1219
1220 tr->allocated_snapshot = true;
1221 }
1222
1223 return 0;
1224}
1225
Fabian Frederickad1438a2014-04-17 21:44:42 +02001226static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001227{
1228 /*
1229 * We don't free the ring buffer. instead, resize it because
1230 * The max_tr ring buffer has some state (e.g. ring->clock) and
1231 * we want preserve it.
1232 */
1233 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1234 set_buffer_entries(&tr->max_buffer, 1);
1235 tracing_reset_online_cpus(&tr->max_buffer);
1236 tr->allocated_snapshot = false;
1237}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001238
1239/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001240 * tracing_alloc_snapshot - allocate snapshot buffer.
1241 *
1242 * This only allocates the snapshot buffer if it isn't already
1243 * allocated - it doesn't also take a snapshot.
1244 *
1245 * This is meant to be used in cases where the snapshot buffer needs
1246 * to be set up for events that can't sleep but need to be able to
1247 * trigger a snapshot.
1248 */
1249int tracing_alloc_snapshot(void)
1250{
1251 struct trace_array *tr = &global_trace;
1252 int ret;
1253
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001254 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001255 WARN_ON(ret < 0);
1256
1257 return ret;
1258}
1259EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1260
1261/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001262 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001263 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001264 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001265 * snapshot buffer if it isn't already allocated. Use this only
1266 * where it is safe to sleep, as the allocation may sleep.
1267 *
1268 * This causes a swap between the snapshot buffer and the current live
1269 * tracing buffer. You can use this to take snapshots of the live
1270 * trace when some condition is triggered, but continue to trace.
1271 */
1272void tracing_snapshot_alloc(void)
1273{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001274 int ret;
1275
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001276 ret = tracing_alloc_snapshot();
1277 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001278 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001279
1280 tracing_snapshot();
1281}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001282EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001283
1284/**
1285 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1286 * @tr: The tracing instance
1287 * @cond_data: User data to associate with the snapshot
1288 * @update: Implementation of the cond_snapshot update function
1289 *
1290 * Check whether the conditional snapshot for the given instance has
1291 * already been enabled, or if the current tracer is already using a
1292 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1293 * save the cond_data and update function inside.
1294 *
1295 * Returns 0 if successful, error otherwise.
1296 */
1297int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1298 cond_update_fn_t update)
1299{
1300 struct cond_snapshot *cond_snapshot;
1301 int ret = 0;
1302
1303 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1304 if (!cond_snapshot)
1305 return -ENOMEM;
1306
1307 cond_snapshot->cond_data = cond_data;
1308 cond_snapshot->update = update;
1309
1310 mutex_lock(&trace_types_lock);
1311
1312 ret = tracing_alloc_snapshot_instance(tr);
1313 if (ret)
1314 goto fail_unlock;
1315
1316 if (tr->current_trace->use_max_tr) {
1317 ret = -EBUSY;
1318 goto fail_unlock;
1319 }
1320
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001321 /*
1322 * The cond_snapshot can only change to NULL without the
1323 * trace_types_lock. We don't care if we race with it going
1324 * to NULL, but we want to make sure that it's not set to
1325 * something other than NULL when we get here, which we can
1326 * do safely with only holding the trace_types_lock and not
1327 * having to take the max_lock.
1328 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001329 if (tr->cond_snapshot) {
1330 ret = -EBUSY;
1331 goto fail_unlock;
1332 }
1333
1334 arch_spin_lock(&tr->max_lock);
1335 tr->cond_snapshot = cond_snapshot;
1336 arch_spin_unlock(&tr->max_lock);
1337
1338 mutex_unlock(&trace_types_lock);
1339
1340 return ret;
1341
1342 fail_unlock:
1343 mutex_unlock(&trace_types_lock);
1344 kfree(cond_snapshot);
1345 return ret;
1346}
1347EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1348
1349/**
1350 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1351 * @tr: The tracing instance
1352 *
1353 * Check whether the conditional snapshot for the given instance is
1354 * enabled; if so, free the cond_snapshot associated with it,
1355 * otherwise return -EINVAL.
1356 *
1357 * Returns 0 if successful, error otherwise.
1358 */
1359int tracing_snapshot_cond_disable(struct trace_array *tr)
1360{
1361 int ret = 0;
1362
1363 arch_spin_lock(&tr->max_lock);
1364
1365 if (!tr->cond_snapshot)
1366 ret = -EINVAL;
1367 else {
1368 kfree(tr->cond_snapshot);
1369 tr->cond_snapshot = NULL;
1370 }
1371
1372 arch_spin_unlock(&tr->max_lock);
1373
1374 return ret;
1375}
1376EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001377#else
1378void tracing_snapshot(void)
1379{
1380 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1381}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001382EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001383void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1384{
1385 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1386}
1387EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001388int tracing_alloc_snapshot(void)
1389{
1390 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1391 return -ENODEV;
1392}
1393EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001394void tracing_snapshot_alloc(void)
1395{
1396 /* Give warning */
1397 tracing_snapshot();
1398}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001399EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001400void *tracing_cond_snapshot_data(struct trace_array *tr)
1401{
1402 return NULL;
1403}
1404EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1405int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1406{
1407 return -ENODEV;
1408}
1409EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1410int tracing_snapshot_cond_disable(struct trace_array *tr)
1411{
1412 return false;
1413}
1414EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001415#endif /* CONFIG_TRACER_SNAPSHOT */
1416
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001417void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001418{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001419 if (tr->array_buffer.buffer)
1420 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001421 /*
1422 * This flag is looked at when buffers haven't been allocated
1423 * yet, or by some tracers (like irqsoff), that just want to
1424 * know if the ring buffer has been disabled, but it can handle
1425 * races of where it gets disabled but we still do a record.
1426 * As the check is in the fast path of the tracers, it is more
1427 * important to be fast than accurate.
1428 */
1429 tr->buffer_disabled = 1;
1430 /* Make the flag seen by readers */
1431 smp_wmb();
1432}
1433
Steven Rostedt499e5472012-02-22 15:50:28 -05001434/**
1435 * tracing_off - turn off tracing buffers
1436 *
1437 * This function stops the tracing buffers from recording data.
1438 * It does not disable any overhead the tracers themselves may
1439 * be causing. This function simply causes all recording to
1440 * the ring buffers to fail.
1441 */
1442void tracing_off(void)
1443{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001444 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001445}
1446EXPORT_SYMBOL_GPL(tracing_off);
1447
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001448void disable_trace_on_warning(void)
1449{
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001450 if (__disable_trace_on_warning) {
1451 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1452 "Disabling tracing due to warning\n");
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001453 tracing_off();
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001454 }
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001455}
1456
Steven Rostedt499e5472012-02-22 15:50:28 -05001457/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001458 * tracer_tracing_is_on - show real state of ring buffer enabled
1459 * @tr : the trace array to know if ring buffer is enabled
1460 *
1461 * Shows real state of the ring buffer if it is enabled or not.
1462 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001463bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001464{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001465 if (tr->array_buffer.buffer)
1466 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001467 return !tr->buffer_disabled;
1468}
1469
Steven Rostedt499e5472012-02-22 15:50:28 -05001470/**
1471 * tracing_is_on - show state of ring buffers enabled
1472 */
1473int tracing_is_on(void)
1474{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001475 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001476}
1477EXPORT_SYMBOL_GPL(tracing_is_on);
1478
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001479static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001480{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001481 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001482
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483 if (!str)
1484 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001485 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001486 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001487 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001488 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001489 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001490 return 1;
1491}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001492__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001493
Tim Bird0e950172010-02-25 15:36:43 -08001494static int __init set_tracing_thresh(char *str)
1495{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001496 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001497 int ret;
1498
1499 if (!str)
1500 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001501 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001502 if (ret < 0)
1503 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001504 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001505 return 1;
1506}
1507__setup("tracing_thresh=", set_tracing_thresh);
1508
Steven Rostedt57f50be2008-05-12 21:20:44 +02001509unsigned long nsecs_to_usecs(unsigned long nsecs)
1510{
1511 return nsecs / 1000;
1512}
1513
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001514/*
1515 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001516 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001517 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001518 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001519 */
1520#undef C
1521#define C(a, b) b
1522
Ingo Molnarf2cc0202021-03-23 18:49:35 +01001523/* These must match the bit positions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001524static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001525 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526 NULL
1527};
1528
Zhaolei5079f322009-08-25 16:12:56 +08001529static struct {
1530 u64 (*func)(void);
1531 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001532 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001533} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001534 { trace_clock_local, "local", 1 },
1535 { trace_clock_global, "global", 1 },
1536 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001537 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001538 { trace_clock, "perf", 1 },
1539 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001540 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001541 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001542 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001543};
1544
Tom Zanussi860f9f62018-01-15 20:51:48 -06001545bool trace_clock_in_ns(struct trace_array *tr)
1546{
1547 if (trace_clocks[tr->clock_id].in_ns)
1548 return true;
1549
1550 return false;
1551}
1552
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001553/*
1554 * trace_parser_get_init - gets the buffer for trace parser
1555 */
1556int trace_parser_get_init(struct trace_parser *parser, int size)
1557{
1558 memset(parser, 0, sizeof(*parser));
1559
1560 parser->buffer = kmalloc(size, GFP_KERNEL);
1561 if (!parser->buffer)
1562 return 1;
1563
1564 parser->size = size;
1565 return 0;
1566}
1567
1568/*
1569 * trace_parser_put - frees the buffer for trace parser
1570 */
1571void trace_parser_put(struct trace_parser *parser)
1572{
1573 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001574 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001575}
1576
1577/*
1578 * trace_get_user - reads the user input string separated by space
1579 * (matched by isspace(ch))
1580 *
1581 * For each string found the 'struct trace_parser' is updated,
1582 * and the function returns.
1583 *
1584 * Returns number of bytes read.
1585 *
1586 * See kernel/trace/trace.h for 'struct trace_parser' details.
1587 */
1588int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1589 size_t cnt, loff_t *ppos)
1590{
1591 char ch;
1592 size_t read = 0;
1593 ssize_t ret;
1594
1595 if (!*ppos)
1596 trace_parser_clear(parser);
1597
1598 ret = get_user(ch, ubuf++);
1599 if (ret)
1600 goto out;
1601
1602 read++;
1603 cnt--;
1604
1605 /*
1606 * The parser is not finished with the last write,
1607 * continue reading the user input without skipping spaces.
1608 */
1609 if (!parser->cont) {
1610 /* skip white space */
1611 while (cnt && isspace(ch)) {
1612 ret = get_user(ch, ubuf++);
1613 if (ret)
1614 goto out;
1615 read++;
1616 cnt--;
1617 }
1618
Changbin Du76638d92018-01-16 17:02:29 +08001619 parser->idx = 0;
1620
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001621 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001622 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001623 *ppos += read;
1624 ret = read;
1625 goto out;
1626 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001627 }
1628
1629 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001630 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001631 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001632 parser->buffer[parser->idx++] = ch;
1633 else {
1634 ret = -EINVAL;
1635 goto out;
1636 }
1637 ret = get_user(ch, ubuf++);
1638 if (ret)
1639 goto out;
1640 read++;
1641 cnt--;
1642 }
1643
1644 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001645 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001646 parser->buffer[parser->idx] = 0;
1647 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001648 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001649 parser->cont = true;
1650 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001651 /* Make sure the parsed string always terminates with '\0'. */
1652 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001653 } else {
1654 ret = -EINVAL;
1655 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001656 }
1657
1658 *ppos += read;
1659 ret = read;
1660
1661out:
1662 return ret;
1663}
1664
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001665/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001666static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001667{
1668 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001669
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001670 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001671 return -EBUSY;
1672
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001673 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001674 if (cnt > len)
1675 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001676 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001677
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001678 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001679 return cnt;
1680}
1681
Tim Bird0e950172010-02-25 15:36:43 -08001682unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001683static const struct file_operations tracing_max_lat_fops;
1684
1685#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1686 defined(CONFIG_FSNOTIFY)
1687
1688static struct workqueue_struct *fsnotify_wq;
1689
1690static void latency_fsnotify_workfn(struct work_struct *work)
1691{
1692 struct trace_array *tr = container_of(work, struct trace_array,
1693 fsnotify_work);
Amir Goldstein82ace1e2020-07-22 15:58:44 +03001694 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001695}
1696
1697static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1698{
1699 struct trace_array *tr = container_of(iwork, struct trace_array,
1700 fsnotify_irqwork);
1701 queue_work(fsnotify_wq, &tr->fsnotify_work);
1702}
1703
1704static void trace_create_maxlat_file(struct trace_array *tr,
1705 struct dentry *d_tracer)
1706{
1707 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1708 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1709 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1710 d_tracer, &tr->max_latency,
1711 &tracing_max_lat_fops);
1712}
1713
1714__init static int latency_fsnotify_init(void)
1715{
1716 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1717 WQ_UNBOUND | WQ_HIGHPRI, 0);
1718 if (!fsnotify_wq) {
1719 pr_err("Unable to allocate tr_max_lat_wq\n");
1720 return -ENOMEM;
1721 }
1722 return 0;
1723}
1724
1725late_initcall_sync(latency_fsnotify_init);
1726
1727void latency_fsnotify(struct trace_array *tr)
1728{
1729 if (!fsnotify_wq)
1730 return;
1731 /*
1732 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1733 * possible that we are called from __schedule() or do_idle(), which
1734 * could cause a deadlock.
1735 */
1736 irq_work_queue(&tr->fsnotify_irqwork);
1737}
1738
1739/*
1740 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1741 * defined(CONFIG_FSNOTIFY)
1742 */
1743#else
1744
1745#define trace_create_maxlat_file(tr, d_tracer) \
1746 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1747 &tr->max_latency, &tracing_max_lat_fops)
1748
1749#endif
Tim Bird0e950172010-02-25 15:36:43 -08001750
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001751#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001752/*
1753 * Copy the new maximum trace into the separate maximum-trace
1754 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001755 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001756 */
1757static void
1758__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1759{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001760 struct array_buffer *trace_buf = &tr->array_buffer;
1761 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001762 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1763 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001764
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001765 max_buf->cpu = cpu;
1766 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001767
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001768 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001769 max_data->critical_start = data->critical_start;
1770 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001771
Tom Zanussi85f726a2019-03-05 10:12:00 -06001772 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001773 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001774 /*
1775 * If tsk == current, then use current_uid(), as that does not use
1776 * RCU. The irq tracer can be called out of RCU scope.
1777 */
1778 if (tsk == current)
1779 max_data->uid = current_uid();
1780 else
1781 max_data->uid = task_uid(tsk);
1782
Steven Rostedt8248ac02009-09-02 12:27:41 -04001783 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1784 max_data->policy = tsk->policy;
1785 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001786
1787 /* record this tasks comm */
1788 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001789 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001790}
1791
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001792/**
1793 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1794 * @tr: tracer
1795 * @tsk: the task with the latency
1796 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001797 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001798 *
1799 * Flip the buffers between the @tr and the max_tr and record information
1800 * about which task was the cause of this latency.
1801 */
Ingo Molnare309b412008-05-12 21:20:51 +02001802void
Tom Zanussia35873a2019-02-13 17:42:45 -06001803update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1804 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001805{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001806 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001807 return;
1808
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001809 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001810
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001811 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001812 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001813 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001814 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001815 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001816
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001817 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001818
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001819 /* Inherit the recordable setting from array_buffer */
1820 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001821 ring_buffer_record_on(tr->max_buffer.buffer);
1822 else
1823 ring_buffer_record_off(tr->max_buffer.buffer);
1824
Tom Zanussia35873a2019-02-13 17:42:45 -06001825#ifdef CONFIG_TRACER_SNAPSHOT
1826 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1827 goto out_unlock;
1828#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001829 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001830
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001831 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001832
1833 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001834 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001835}
1836
1837/**
1838 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001839 * @tr: tracer
1840 * @tsk: task with the latency
1841 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001842 *
1843 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001844 */
Ingo Molnare309b412008-05-12 21:20:51 +02001845void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001846update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1847{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001848 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001849
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001850 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001851 return;
1852
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001853 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001854 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001855 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001856 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001857 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001858 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001859
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001860 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001861
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001862 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001863
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001864 if (ret == -EBUSY) {
1865 /*
1866 * We failed to swap the buffer due to a commit taking
1867 * place on this CPU. We fail to record, but we reset
1868 * the max trace buffer (no one writes directly to it)
1869 * and flag that it failed.
1870 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001871 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001872 "Failed to swap buffers due to commit in progress\n");
1873 }
1874
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001875 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001876
1877 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001878 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001879}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001880#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001881
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001882static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001883{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001884 /* Iterators are static, they should be filled or empty */
1885 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001886 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001887
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001888 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001889 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001890}
1891
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001892#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001893static bool selftests_can_run;
1894
1895struct trace_selftests {
1896 struct list_head list;
1897 struct tracer *type;
1898};
1899
1900static LIST_HEAD(postponed_selftests);
1901
1902static int save_selftest(struct tracer *type)
1903{
1904 struct trace_selftests *selftest;
1905
1906 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1907 if (!selftest)
1908 return -ENOMEM;
1909
1910 selftest->type = type;
1911 list_add(&selftest->list, &postponed_selftests);
1912 return 0;
1913}
1914
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001915static int run_tracer_selftest(struct tracer *type)
1916{
1917 struct trace_array *tr = &global_trace;
1918 struct tracer *saved_tracer = tr->current_trace;
1919 int ret;
1920
1921 if (!type->selftest || tracing_selftest_disabled)
1922 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001923
1924 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001925 * If a tracer registers early in boot up (before scheduling is
1926 * initialized and such), then do not run its selftests yet.
1927 * Instead, run it a little later in the boot process.
1928 */
1929 if (!selftests_can_run)
1930 return save_selftest(type);
1931
Steven Rostedt (VMware)ee666a12021-03-01 10:49:35 -05001932 if (!tracing_is_on()) {
1933 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1934 type->name);
1935 return 0;
1936 }
1937
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001938 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001939 * Run a selftest on this tracer.
1940 * Here we reset the trace buffer, and set the current
1941 * tracer to be this tracer. The tracer can then run some
1942 * internal tracing to verify that everything is in order.
1943 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001944 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001945 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001946
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001947 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001948
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001949#ifdef CONFIG_TRACER_MAX_TRACE
1950 if (type->use_max_tr) {
1951 /* If we expanded the buffers, make sure the max is expanded too */
1952 if (ring_buffer_expanded)
1953 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1954 RING_BUFFER_ALL_CPUS);
1955 tr->allocated_snapshot = true;
1956 }
1957#endif
1958
1959 /* the test is responsible for initializing and enabling */
1960 pr_info("Testing tracer %s: ", type->name);
1961 ret = type->selftest(type, tr);
1962 /* the test is responsible for resetting too */
1963 tr->current_trace = saved_tracer;
1964 if (ret) {
1965 printk(KERN_CONT "FAILED!\n");
1966 /* Add the warning after printing 'FAILED' */
1967 WARN_ON(1);
1968 return -1;
1969 }
1970 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001971 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001972
1973#ifdef CONFIG_TRACER_MAX_TRACE
1974 if (type->use_max_tr) {
1975 tr->allocated_snapshot = false;
1976
1977 /* Shrink the max buffer again */
1978 if (ring_buffer_expanded)
1979 ring_buffer_resize(tr->max_buffer.buffer, 1,
1980 RING_BUFFER_ALL_CPUS);
1981 }
1982#endif
1983
1984 printk(KERN_CONT "PASSED\n");
1985 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001986}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001987
1988static __init int init_trace_selftests(void)
1989{
1990 struct trace_selftests *p, *n;
1991 struct tracer *t, **last;
1992 int ret;
1993
1994 selftests_can_run = true;
1995
1996 mutex_lock(&trace_types_lock);
1997
1998 if (list_empty(&postponed_selftests))
1999 goto out;
2000
2001 pr_info("Running postponed tracer tests:\n");
2002
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002003 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002004 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01002005 /* This loop can take minutes when sanitizers are enabled, so
2006 * lets make sure we allow RCU processing.
2007 */
2008 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002009 ret = run_tracer_selftest(p->type);
2010 /* If the test fails, then warn and remove from available_tracers */
2011 if (ret < 0) {
2012 WARN(1, "tracer: %s failed selftest, disabling\n",
2013 p->type->name);
2014 last = &trace_types;
2015 for (t = trace_types; t; t = t->next) {
2016 if (t == p->type) {
2017 *last = t->next;
2018 break;
2019 }
2020 last = &t->next;
2021 }
2022 }
2023 list_del(&p->list);
2024 kfree(p);
2025 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002026 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002027
2028 out:
2029 mutex_unlock(&trace_types_lock);
2030
2031 return 0;
2032}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04002033core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002034#else
2035static inline int run_tracer_selftest(struct tracer *type)
2036{
2037 return 0;
2038}
2039#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002040
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002041static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2042
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002043static void __init apply_trace_boot_options(void);
2044
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002045/**
2046 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002047 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002048 *
2049 * Register a new plugin tracer.
2050 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002051int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002052{
2053 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002054 int ret = 0;
2055
2056 if (!type->name) {
2057 pr_info("Tracer must have a name\n");
2058 return -1;
2059 }
2060
Dan Carpenter24a461d2010-07-10 12:06:44 +02002061 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08002062 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2063 return -1;
2064 }
2065
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002066 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11002067 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002068 type->name);
2069 return -EPERM;
2070 }
2071
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002072 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01002073
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002074 tracing_selftest_running = true;
2075
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002076 for (t = trace_types; t; t = t->next) {
2077 if (strcmp(type->name, t->name) == 0) {
2078 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08002079 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002080 type->name);
2081 ret = -1;
2082 goto out;
2083 }
2084 }
2085
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002086 if (!type->set_flag)
2087 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08002088 if (!type->flags) {
2089 /*allocate a dummy tracer_flags*/
2090 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08002091 if (!type->flags) {
2092 ret = -ENOMEM;
2093 goto out;
2094 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08002095 type->flags->val = 0;
2096 type->flags->opts = dummy_tracer_opt;
2097 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002098 if (!type->flags->opts)
2099 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002100
Chunyu Hud39cdd22016-03-08 21:37:01 +08002101 /* store the tracer for __set_tracer_option */
2102 type->flags->trace = type;
2103
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002104 ret = run_tracer_selftest(type);
2105 if (ret < 0)
2106 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02002107
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002108 type->next = trace_types;
2109 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002110 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02002111
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002112 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002113 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002114 mutex_unlock(&trace_types_lock);
2115
Steven Rostedtdac74942009-02-05 01:13:38 -05002116 if (ret || !default_bootup_tracer)
2117 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002118
Li Zefanee6c2c12009-09-18 14:06:47 +08002119 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05002120 goto out_unlock;
2121
2122 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2123 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05002124 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05002125 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002126
2127 apply_trace_boot_options();
2128
Steven Rostedtdac74942009-02-05 01:13:38 -05002129 /* disable other selftests, since this will break it. */
Masami Hiramatsu60efe212020-12-08 17:54:09 +09002130 disable_tracing_selftest("running a tracer");
Steven Rostedtdac74942009-02-05 01:13:38 -05002131
2132 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002133 return ret;
2134}
2135
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002136static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04002137{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002138 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04002139
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002140 if (!buffer)
2141 return;
2142
Steven Rostedtf6339032009-09-04 12:35:16 -04002143 ring_buffer_record_disable(buffer);
2144
2145 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002146 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04002147 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04002148
2149 ring_buffer_record_enable(buffer);
2150}
2151
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002152void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002153{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002154 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002155
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002156 if (!buffer)
2157 return;
2158
Steven Rostedt621968c2009-09-04 12:02:35 -04002159 ring_buffer_record_disable(buffer);
2160
2161 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002162 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002163
Alexander Z Lam94571582013-08-02 18:36:16 -07002164 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002165
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +10002166 ring_buffer_reset_online_cpus(buffer);
Steven Rostedt621968c2009-09-04 12:02:35 -04002167
2168 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002169}
2170
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002171/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002172void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002173{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002174 struct trace_array *tr;
2175
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002176 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002177 if (!tr->clear_trace)
2178 continue;
2179 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002180 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002181#ifdef CONFIG_TRACER_MAX_TRACE
2182 tracing_reset_online_cpus(&tr->max_buffer);
2183#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002184 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002185}
2186
Joel Fernandesd914ba32017-06-26 19:01:55 -07002187static int *tgid_map;
2188
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002189#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002190#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002191static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002192struct saved_cmdlines_buffer {
2193 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2194 unsigned *map_cmdline_to_pid;
2195 unsigned cmdline_num;
2196 int cmdline_idx;
2197 char *saved_cmdlines;
2198};
2199static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002200
Steven Rostedt25b0b442008-05-12 21:21:00 +02002201/* temporary disable recording */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002202static atomic_t trace_record_taskinfo_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002203
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002204static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002205{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002206 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2207}
2208
2209static inline void set_cmdline(int idx, const char *cmdline)
2210{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002211 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002212}
2213
2214static int allocate_cmdlines_buffer(unsigned int val,
2215 struct saved_cmdlines_buffer *s)
2216{
Kees Cook6da2ec52018-06-12 13:55:00 -07002217 s->map_cmdline_to_pid = kmalloc_array(val,
2218 sizeof(*s->map_cmdline_to_pid),
2219 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002220 if (!s->map_cmdline_to_pid)
2221 return -ENOMEM;
2222
Kees Cook6da2ec52018-06-12 13:55:00 -07002223 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002224 if (!s->saved_cmdlines) {
2225 kfree(s->map_cmdline_to_pid);
2226 return -ENOMEM;
2227 }
2228
2229 s->cmdline_idx = 0;
2230 s->cmdline_num = val;
2231 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2232 sizeof(s->map_pid_to_cmdline));
2233 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2234 val * sizeof(*s->map_cmdline_to_pid));
2235
2236 return 0;
2237}
2238
2239static int trace_create_savedcmd(void)
2240{
2241 int ret;
2242
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002243 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002244 if (!savedcmd)
2245 return -ENOMEM;
2246
2247 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2248 if (ret < 0) {
2249 kfree(savedcmd);
2250 savedcmd = NULL;
2251 return -ENOMEM;
2252 }
2253
2254 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002255}
2256
Carsten Emdeb5130b12009-09-13 01:43:07 +02002257int is_tracing_stopped(void)
2258{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002259 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002260}
2261
Steven Rostedt0f048702008-11-05 16:05:44 -05002262/**
2263 * tracing_start - quick start of the tracer
2264 *
2265 * If tracing is enabled but was stopped by tracing_stop,
2266 * this will start the tracer back up.
2267 */
2268void tracing_start(void)
2269{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002270 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002271 unsigned long flags;
2272
2273 if (tracing_disabled)
2274 return;
2275
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002276 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2277 if (--global_trace.stop_count) {
2278 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002279 /* Someone screwed up their debugging */
2280 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002281 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002282 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002283 goto out;
2284 }
2285
Steven Rostedta2f80712010-03-12 19:56:00 -05002286 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002287 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002288
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002289 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002290 if (buffer)
2291 ring_buffer_record_enable(buffer);
2292
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002293#ifdef CONFIG_TRACER_MAX_TRACE
2294 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002295 if (buffer)
2296 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002297#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002298
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002299 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002300
Steven Rostedt0f048702008-11-05 16:05:44 -05002301 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002302 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2303}
2304
2305static void tracing_start_tr(struct trace_array *tr)
2306{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002307 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002308 unsigned long flags;
2309
2310 if (tracing_disabled)
2311 return;
2312
2313 /* If global, we need to also start the max tracer */
2314 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2315 return tracing_start();
2316
2317 raw_spin_lock_irqsave(&tr->start_lock, flags);
2318
2319 if (--tr->stop_count) {
2320 if (tr->stop_count < 0) {
2321 /* Someone screwed up their debugging */
2322 WARN_ON_ONCE(1);
2323 tr->stop_count = 0;
2324 }
2325 goto out;
2326 }
2327
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002328 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002329 if (buffer)
2330 ring_buffer_record_enable(buffer);
2331
2332 out:
2333 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002334}
2335
2336/**
2337 * tracing_stop - quick stop of the tracer
2338 *
2339 * Light weight way to stop tracing. Use in conjunction with
2340 * tracing_start.
2341 */
2342void tracing_stop(void)
2343{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002344 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002345 unsigned long flags;
2346
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002347 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2348 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002349 goto out;
2350
Steven Rostedta2f80712010-03-12 19:56:00 -05002351 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002352 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002353
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002354 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002355 if (buffer)
2356 ring_buffer_record_disable(buffer);
2357
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002358#ifdef CONFIG_TRACER_MAX_TRACE
2359 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002360 if (buffer)
2361 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002362#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002363
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002364 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002365
Steven Rostedt0f048702008-11-05 16:05:44 -05002366 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002367 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2368}
2369
2370static void tracing_stop_tr(struct trace_array *tr)
2371{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002372 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002373 unsigned long flags;
2374
2375 /* If global, we need to also stop the max tracer */
2376 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2377 return tracing_stop();
2378
2379 raw_spin_lock_irqsave(&tr->start_lock, flags);
2380 if (tr->stop_count++)
2381 goto out;
2382
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002383 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002384 if (buffer)
2385 ring_buffer_record_disable(buffer);
2386
2387 out:
2388 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002389}
2390
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002391static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002392{
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002393 unsigned tpid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002394
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002395 /* treat recording of idle task as a success */
2396 if (!tsk->pid)
2397 return 1;
2398
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002399 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002400
2401 /*
2402 * It's not the end of the world if we don't get
2403 * the lock, but we also don't want to spin
2404 * nor do we want to disable interrupts,
2405 * so if we miss here, then better luck next time.
2406 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002407 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002408 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002409
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002410 idx = savedcmd->map_pid_to_cmdline[tpid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002411 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002412 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002414 savedcmd->map_pid_to_cmdline[tpid] = idx;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002415 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002416 }
2417
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002418 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002419 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002421 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002422
2423 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002424}
2425
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002426static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002427{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002428 unsigned map;
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002429 int tpid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430
Steven Rostedt4ca530852009-03-16 19:20:15 -04002431 if (!pid) {
2432 strcpy(comm, "<idle>");
2433 return;
2434 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002435
Steven Rostedt74bf4072010-01-25 15:11:53 -05002436 if (WARN_ON_ONCE(pid < 0)) {
2437 strcpy(comm, "<XXX>");
2438 return;
2439 }
2440
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002441 tpid = pid & (PID_MAX_DEFAULT - 1);
2442 map = savedcmd->map_pid_to_cmdline[tpid];
2443 if (map != NO_CMDLINE_MAP) {
2444 tpid = savedcmd->map_cmdline_to_pid[map];
2445 if (tpid == pid) {
2446 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2447 return;
2448 }
Steven Rostedt4ca530852009-03-16 19:20:15 -04002449 }
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002450 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002451}
2452
2453void trace_find_cmdline(int pid, char comm[])
2454{
2455 preempt_disable();
2456 arch_spin_lock(&trace_cmdline_lock);
2457
2458 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002459
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002460 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002461 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002462}
2463
Joel Fernandesd914ba32017-06-26 19:01:55 -07002464int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465{
Joel Fernandesd914ba32017-06-26 19:01:55 -07002466 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2467 return 0;
2468
2469 return tgid_map[pid];
2470}
2471
2472static int trace_save_tgid(struct task_struct *tsk)
2473{
Joel Fernandesbd45d342017-07-06 16:00:22 -07002474 /* treat recording of idle task as a success */
2475 if (!tsk->pid)
2476 return 1;
2477
2478 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
Joel Fernandesd914ba32017-06-26 19:01:55 -07002479 return 0;
2480
2481 tgid_map[tsk->pid] = tsk->tgid;
2482 return 1;
2483}
2484
2485static bool tracing_record_taskinfo_skip(int flags)
2486{
2487 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2488 return true;
2489 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2490 return true;
2491 if (!__this_cpu_read(trace_taskinfo_save))
2492 return true;
2493 return false;
2494}
2495
2496/**
2497 * tracing_record_taskinfo - record the task info of a task
2498 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002499 * @task: task to record
2500 * @flags: TRACE_RECORD_CMDLINE for recording comm
2501 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002502 */
2503void tracing_record_taskinfo(struct task_struct *task, int flags)
2504{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002505 bool done;
2506
Joel Fernandesd914ba32017-06-26 19:01:55 -07002507 if (tracing_record_taskinfo_skip(flags))
2508 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002509
2510 /*
2511 * Record as much task information as possible. If some fail, continue
2512 * to try to record the others.
2513 */
2514 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2515 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2516
2517 /* If recording any information failed, retry again soon. */
2518 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519 return;
2520
Joel Fernandesd914ba32017-06-26 19:01:55 -07002521 __this_cpu_write(trace_taskinfo_save, false);
2522}
2523
2524/**
2525 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2526 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002527 * @prev: previous task during sched_switch
2528 * @next: next task during sched_switch
2529 * @flags: TRACE_RECORD_CMDLINE for recording comm
2530 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002531 */
2532void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2533 struct task_struct *next, int flags)
2534{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002535 bool done;
2536
Joel Fernandesd914ba32017-06-26 19:01:55 -07002537 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002538 return;
2539
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002540 /*
2541 * Record as much task information as possible. If some fail, continue
2542 * to try to record the others.
2543 */
2544 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2545 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2546 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2547 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002548
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002549 /* If recording any information failed, retry again soon. */
2550 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002551 return;
2552
2553 __this_cpu_write(trace_taskinfo_save, false);
2554}
2555
2556/* Helpers to record a specific task information */
2557void tracing_record_cmdline(struct task_struct *task)
2558{
2559 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2560}
2561
2562void tracing_record_tgid(struct task_struct *task)
2563{
2564 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002565}
2566
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002567/*
2568 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2569 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2570 * simplifies those functions and keeps them in sync.
2571 */
2572enum print_line_t trace_handle_return(struct trace_seq *s)
2573{
2574 return trace_seq_has_overflowed(s) ?
2575 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2576}
2577EXPORT_SYMBOL_GPL(trace_handle_return);
2578
Sebastian Andrzej Siewior0c020062021-01-25 20:45:09 +01002579unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580{
Sebastian Andrzej Siewior0c020062021-01-25 20:45:09 +01002581 unsigned int trace_flags = irqs_status;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002582 unsigned int pc;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002583
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002584 pc = preempt_count();
2585
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002586 if (pc & NMI_MASK)
2587 trace_flags |= TRACE_FLAG_NMI;
2588 if (pc & HARDIRQ_MASK)
2589 trace_flags |= TRACE_FLAG_HARDIRQ;
Sebastian Andrzej Siewiorfe427882021-01-25 20:45:10 +01002590 if (in_serving_softirq())
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002591 trace_flags |= TRACE_FLAG_SOFTIRQ;
2592
2593 if (tif_need_resched())
2594 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2595 if (test_preempt_need_resched())
2596 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2597 return (trace_flags << 16) | (pc & 0xff);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598}
2599
Steven Rostedte77405a2009-09-02 14:17:06 -04002600struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002601trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002602 int type,
2603 unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002604 unsigned int trace_ctx)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002605{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002606 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002607}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002608
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002609DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2610DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2611static int trace_buffered_event_ref;
2612
2613/**
2614 * trace_buffered_event_enable - enable buffering events
2615 *
2616 * When events are being filtered, it is quicker to use a temporary
2617 * buffer to write the event data into if there's a likely chance
2618 * that it will not be committed. The discard of the ring buffer
2619 * is not as fast as committing, and is much slower than copying
2620 * a commit.
2621 *
2622 * When an event is to be filtered, allocate per cpu buffers to
2623 * write the event data into, and if the event is filtered and discarded
2624 * it is simply dropped, otherwise, the entire data is to be committed
2625 * in one shot.
2626 */
2627void trace_buffered_event_enable(void)
2628{
2629 struct ring_buffer_event *event;
2630 struct page *page;
2631 int cpu;
2632
2633 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2634
2635 if (trace_buffered_event_ref++)
2636 return;
2637
2638 for_each_tracing_cpu(cpu) {
2639 page = alloc_pages_node(cpu_to_node(cpu),
2640 GFP_KERNEL | __GFP_NORETRY, 0);
2641 if (!page)
2642 goto failed;
2643
2644 event = page_address(page);
2645 memset(event, 0, sizeof(*event));
2646
2647 per_cpu(trace_buffered_event, cpu) = event;
2648
2649 preempt_disable();
2650 if (cpu == smp_processor_id() &&
Xianting Tianb427e762020-08-13 19:28:03 +08002651 __this_cpu_read(trace_buffered_event) !=
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002652 per_cpu(trace_buffered_event, cpu))
2653 WARN_ON_ONCE(1);
2654 preempt_enable();
2655 }
2656
2657 return;
2658 failed:
2659 trace_buffered_event_disable();
2660}
2661
2662static void enable_trace_buffered_event(void *data)
2663{
2664 /* Probably not needed, but do it anyway */
2665 smp_rmb();
2666 this_cpu_dec(trace_buffered_event_cnt);
2667}
2668
2669static void disable_trace_buffered_event(void *data)
2670{
2671 this_cpu_inc(trace_buffered_event_cnt);
2672}
2673
2674/**
2675 * trace_buffered_event_disable - disable buffering events
2676 *
2677 * When a filter is removed, it is faster to not use the buffered
2678 * events, and to commit directly into the ring buffer. Free up
2679 * the temp buffers when there are no more users. This requires
2680 * special synchronization with current events.
2681 */
2682void trace_buffered_event_disable(void)
2683{
2684 int cpu;
2685
2686 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2687
2688 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2689 return;
2690
2691 if (--trace_buffered_event_ref)
2692 return;
2693
2694 preempt_disable();
2695 /* For each CPU, set the buffer as used. */
2696 smp_call_function_many(tracing_buffer_mask,
2697 disable_trace_buffered_event, NULL, 1);
2698 preempt_enable();
2699
2700 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002701 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002702
2703 for_each_tracing_cpu(cpu) {
2704 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2705 per_cpu(trace_buffered_event, cpu) = NULL;
2706 }
2707 /*
2708 * Make sure trace_buffered_event is NULL before clearing
2709 * trace_buffered_event_cnt.
2710 */
2711 smp_wmb();
2712
2713 preempt_disable();
2714 /* Do the work on each cpu */
2715 smp_call_function_many(tracing_buffer_mask,
2716 enable_trace_buffered_event, NULL, 1);
2717 preempt_enable();
2718}
2719
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002720static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002721
Steven Rostedtef5580d2009-02-27 19:38:04 -05002722struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002723trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002724 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002725 int type, unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002726 unsigned int trace_ctx)
Steven Rostedtccb469a2012-08-02 10:32:10 -04002727{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002728 struct ring_buffer_event *entry;
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002729 struct trace_array *tr = trace_file->tr;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002730 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002731
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002732 *current_rb = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002733
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002734 if (!tr->no_filter_buffering_ref &&
2735 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002736 (entry = this_cpu_read(trace_buffered_event))) {
2737 /* Try to use the per cpu buffer first */
2738 val = this_cpu_inc_return(trace_buffered_event_cnt);
Steven Rostedt (VMware)b220c042021-02-10 11:53:22 -05002739 if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002740 trace_event_setup(entry, type, trace_ctx);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002741 entry->array[0] = len;
2742 return entry;
2743 }
2744 this_cpu_dec(trace_buffered_event_cnt);
2745 }
2746
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002747 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2748 trace_ctx);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002749 /*
2750 * If tracing is off, but we have triggers enabled
2751 * we still need to look at the event data. Use the temp_buffer
Qiujun Huang906695e2020-10-31 16:57:14 +08002752 * to store the trace event for the trigger to use. It's recursive
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002753 * safe and will not be recorded anywhere.
2754 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002755 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002756 *current_rb = temp_buffer;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002757 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2758 trace_ctx);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002759 }
2760 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002761}
2762EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2763
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002764static DEFINE_SPINLOCK(tracepoint_iter_lock);
2765static DEFINE_MUTEX(tracepoint_printk_mutex);
2766
2767static void output_printk(struct trace_event_buffer *fbuffer)
2768{
2769 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002770 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002771 struct trace_event *event;
2772 unsigned long flags;
2773 struct trace_iterator *iter = tracepoint_print_iter;
2774
2775 /* We should never get here if iter is NULL */
2776 if (WARN_ON_ONCE(!iter))
2777 return;
2778
2779 event_call = fbuffer->trace_file->event_call;
2780 if (!event_call || !event_call->event.funcs ||
2781 !event_call->event.funcs->trace)
2782 return;
2783
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002784 file = fbuffer->trace_file;
2785 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2786 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2787 !filter_match_preds(file->filter, fbuffer->entry)))
2788 return;
2789
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002790 event = &fbuffer->trace_file->event_call->event;
2791
2792 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2793 trace_seq_init(&iter->seq);
2794 iter->ent = fbuffer->entry;
2795 event_call->event.funcs->trace(iter, 0, event);
2796 trace_seq_putc(&iter->seq, 0);
2797 printk("%s", iter->seq.buffer);
2798
2799 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2800}
2801
2802int tracepoint_printk_sysctl(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002803 void *buffer, size_t *lenp,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002804 loff_t *ppos)
2805{
2806 int save_tracepoint_printk;
2807 int ret;
2808
2809 mutex_lock(&tracepoint_printk_mutex);
2810 save_tracepoint_printk = tracepoint_printk;
2811
2812 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2813
2814 /*
2815 * This will force exiting early, as tracepoint_printk
2816 * is always zero when tracepoint_printk_iter is not allocated
2817 */
2818 if (!tracepoint_print_iter)
2819 tracepoint_printk = 0;
2820
2821 if (save_tracepoint_printk == tracepoint_printk)
2822 goto out;
2823
2824 if (tracepoint_printk)
2825 static_key_enable(&tracepoint_printk_key.key);
2826 else
2827 static_key_disable(&tracepoint_printk_key.key);
2828
2829 out:
2830 mutex_unlock(&tracepoint_printk_mutex);
2831
2832 return ret;
2833}
2834
2835void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2836{
2837 if (static_key_false(&tracepoint_printk_key.key))
2838 output_printk(fbuffer);
2839
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +03002840 if (static_branch_unlikely(&trace_event_exports_enabled))
2841 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
Masami Hiramatsu8cfcf152020-01-11 01:05:31 +09002842 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002843 fbuffer->event, fbuffer->entry,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002844 fbuffer->trace_ctx, fbuffer->regs);
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002845}
2846EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2847
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002848/*
2849 * Skip 3:
2850 *
2851 * trace_buffer_unlock_commit_regs()
2852 * trace_event_buffer_commit()
2853 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302854 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002855# define STACK_SKIP 3
2856
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002857void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002858 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002859 struct ring_buffer_event *event,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002860 unsigned int trace_ctx,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002861 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002862{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002863 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002864
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002865 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002866 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002867 * Note, we can still get here via blktrace, wakeup tracer
2868 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002869 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002870 */
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002871 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2872 ftrace_trace_userstack(tr, buffer, trace_ctx);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002873}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002874
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002875/*
2876 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2877 */
2878void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002879trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002880 struct ring_buffer_event *event)
2881{
2882 __buffer_unlock_commit(buffer, event);
2883}
2884
Ingo Molnare309b412008-05-12 21:20:51 +02002885void
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002886trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2887 parent_ip, unsigned int trace_ctx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002888{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002889 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002890 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002891 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002892 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002893
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002894 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002895 trace_ctx);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002896 if (!event)
2897 return;
2898 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002899 entry->ip = ip;
2900 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002901
Chunyan Zhang478409d2016-11-21 15:57:18 +08002902 if (!call_filter_check_discard(call, entry, buffer, event)) {
Tingwei Zhang8438f522020-10-05 10:13:13 +03002903 if (static_branch_unlikely(&trace_function_exports_enabled))
2904 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002905 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002906 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002907}
2908
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002909#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002910
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002911/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2912#define FTRACE_KSTACK_NESTING 4
2913
2914#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2915
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002916struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002917 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002918};
2919
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002920
2921struct ftrace_stacks {
2922 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2923};
2924
2925static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002926static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2927
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002928static void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002929 unsigned int trace_ctx,
2930 int skip, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02002931{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002932 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002933 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002934 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002935 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04002936 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002937 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02002938
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002939 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002940 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002941 * If regs is set, then these functions will not be in the way.
2942 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002943#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002944 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002945 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002946#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002947
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002948 preempt_disable_notrace();
2949
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002950 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2951
2952 /* This should never happen. If it does, yell once and skip */
Qiujun Huang906695e2020-10-31 16:57:14 +08002953 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002954 goto out;
2955
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002956 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002957 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2958 * interrupt will either see the value pre increment or post
2959 * increment. If the interrupt happens pre increment it will have
2960 * restored the counter when it returns. We just need a barrier to
2961 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002962 */
2963 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002964
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002965 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002966 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002967
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002968 if (regs) {
2969 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2970 size, skip);
2971 } else {
2972 nr_entries = stack_trace_save(fstack->calls, size, skip);
2973 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002974
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002975 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002976 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
Steven Rostedt (VMware)9deb1932021-04-01 13:54:40 -04002977 (sizeof(*entry) - sizeof(entry->caller)) + size,
2978 trace_ctx);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002979 if (!event)
2980 goto out;
2981 entry = ring_buffer_event_data(event);
2982
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02002983 memcpy(&entry->caller, fstack->calls, size);
2984 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002985
Tom Zanussif306cc82013-10-24 08:34:17 -05002986 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002987 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002988
2989 out:
2990 /* Again, don't let gcc optimize things here */
2991 barrier();
Shan Wei82146522012-11-19 13:21:01 +08002992 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002993 preempt_enable_notrace();
2994
Ingo Molnarf0a920d2008-05-12 21:20:47 +02002995}
2996
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002997static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002998 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002999 unsigned int trace_ctx,
3000 int skip, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05003001{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003002 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05003003 return;
3004
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003005 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05003006}
3007
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003008void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3009 int skip)
Steven Rostedt38697052008-10-01 13:14:09 -04003010{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003011 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003012
3013 if (rcu_is_watching()) {
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003014 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003015 return;
3016 }
3017
3018 /*
3019 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3020 * but if the above rcu_is_watching() failed, then the NMI
3021 * triggered someplace critical, and rcu_irq_enter() should
3022 * not be called from NMI.
3023 */
3024 if (unlikely(in_nmi()))
3025 return;
3026
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003027 rcu_irq_enter_irqson();
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003028 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003029 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003030}
3031
Steven Rostedt03889382009-12-11 09:48:22 -05003032/**
3033 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003034 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003035 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003036void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003037{
Steven Rostedt03889382009-12-11 09:48:22 -05003038 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003039 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003040
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003041#ifndef CONFIG_UNWINDER_ORC
3042 /* Skip 1 to skip this function. */
3043 skip++;
3044#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003045 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003046 tracing_gen_ctx(), skip, NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003047}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003048EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003049
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003050#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003051static DEFINE_PER_CPU(int, user_stack_count);
3052
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003053static void
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003054ftrace_trace_userstack(struct trace_array *tr,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003055 struct trace_buffer *buffer, unsigned int trace_ctx)
Török Edwin02b67512008-11-22 13:28:47 +02003056{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003057 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003058 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003059 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003060
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003061 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003062 return;
3063
Steven Rostedtb6345872010-03-12 20:03:30 -05003064 /*
3065 * NMIs can not handle page faults, even with fix ups.
3066 * The save user stack can (and often does) fault.
3067 */
3068 if (unlikely(in_nmi()))
3069 return;
3070
Steven Rostedt91e86e52010-11-10 12:56:12 +01003071 /*
3072 * prevent recursion, since the user stack tracing may
3073 * trigger other kernel events.
3074 */
3075 preempt_disable();
3076 if (__this_cpu_read(user_stack_count))
3077 goto out;
3078
3079 __this_cpu_inc(user_stack_count);
3080
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003081 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003082 sizeof(*entry), trace_ctx);
Török Edwin02b67512008-11-22 13:28:47 +02003083 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003084 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003085 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003086
Steven Rostedt48659d32009-09-11 11:36:23 -04003087 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003088 memset(&entry->caller, 0, sizeof(entry->caller));
3089
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003090 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003091 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003092 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003093
Li Zefan1dbd1952010-12-09 15:47:56 +08003094 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003095 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003096 out:
3097 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003098}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003099#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003100static void ftrace_trace_userstack(struct trace_array *tr,
3101 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003102 unsigned int trace_ctx)
Török Edwin02b67512008-11-22 13:28:47 +02003103{
Török Edwin02b67512008-11-22 13:28:47 +02003104}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003105#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003106
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003107#endif /* CONFIG_STACKTRACE */
3108
Yordan Karadzhov (VMware)c6587972021-04-15 21:18:52 +03003109static inline void
3110func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3111 unsigned long long delta)
3112{
3113 entry->bottom_delta_ts = delta & U32_MAX;
3114 entry->top_delta_ts = (delta >> 32);
3115}
3116
3117void trace_last_func_repeats(struct trace_array *tr,
3118 struct trace_func_repeats *last_info,
3119 unsigned int trace_ctx)
3120{
3121 struct trace_buffer *buffer = tr->array_buffer.buffer;
3122 struct func_repeats_entry *entry;
3123 struct ring_buffer_event *event;
3124 u64 delta;
3125
3126 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3127 sizeof(*entry), trace_ctx);
3128 if (!event)
3129 return;
3130
3131 delta = ring_buffer_event_time_stamp(buffer, event) -
3132 last_info->ts_last_call;
3133
3134 entry = ring_buffer_event_data(event);
3135 entry->ip = last_info->ip;
3136 entry->parent_ip = last_info->parent_ip;
3137 entry->count = last_info->count;
3138 func_repeats_set_delta_ts(entry, delta);
3139
3140 __buffer_unlock_commit(buffer, event);
3141}
3142
Steven Rostedt07d777f2011-09-22 14:01:55 -04003143/* created for use with alloc_percpu */
3144struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003145 int nesting;
3146 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003147};
3148
3149static struct trace_buffer_struct *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003150
3151/*
Qiujun Huang2b5894c2020-10-29 23:05:54 +08003152 * This allows for lockless recording. If we're nested too deeply, then
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003153 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003154 */
3155static char *get_trace_buf(void)
3156{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003157 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003158
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003159 if (!buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003160 return NULL;
3161
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003162 buffer->nesting++;
3163
3164 /* Interrupts must see nesting incremented before we use the buffer */
3165 barrier();
Qiujun Huangc1acb4a2020-10-30 00:19:05 +08003166 return &buffer->buffer[buffer->nesting - 1][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003167}
3168
3169static void put_trace_buf(void)
3170{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003171 /* Don't let the decrement of nesting leak before this */
3172 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003173 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003174}
3175
3176static int alloc_percpu_trace_buffer(void)
3177{
3178 struct trace_buffer_struct *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003179
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003180 if (trace_percpu_buffer)
3181 return 0;
3182
Steven Rostedt07d777f2011-09-22 14:01:55 -04003183 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003184 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003185 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003186
3187 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003188 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003189}
3190
Steven Rostedt81698832012-10-11 10:15:05 -04003191static int buffers_allocated;
3192
Steven Rostedt07d777f2011-09-22 14:01:55 -04003193void trace_printk_init_buffers(void)
3194{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003195 if (buffers_allocated)
3196 return;
3197
3198 if (alloc_percpu_trace_buffer())
3199 return;
3200
Steven Rostedt2184db42014-05-28 13:14:40 -04003201 /* trace_printk() is for debug use only. Don't use it in production. */
3202
Joe Perchesa395d6a2016-03-22 14:28:09 -07003203 pr_warn("\n");
3204 pr_warn("**********************************************************\n");
3205 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3206 pr_warn("** **\n");
3207 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3208 pr_warn("** **\n");
3209 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3210 pr_warn("** unsafe for production use. **\n");
3211 pr_warn("** **\n");
3212 pr_warn("** If you see this message and you are not debugging **\n");
3213 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3214 pr_warn("** **\n");
3215 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3216 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003217
Steven Rostedtb382ede62012-10-10 21:44:34 -04003218 /* Expand the buffers to set size */
3219 tracing_update_buffers();
3220
Steven Rostedt07d777f2011-09-22 14:01:55 -04003221 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003222
3223 /*
3224 * trace_printk_init_buffers() can be called by modules.
3225 * If that happens, then we need to start cmdline recording
3226 * directly here. If the global_trace.buffer is already
3227 * allocated here, then this was called by module code.
3228 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003229 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003230 tracing_start_cmdline_record();
3231}
Divya Indif45d1222019-03-20 11:28:51 -07003232EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003233
3234void trace_printk_start_comm(void)
3235{
3236 /* Start tracing comms if trace printk is set */
3237 if (!buffers_allocated)
3238 return;
3239 tracing_start_cmdline_record();
3240}
3241
3242static void trace_printk_start_stop_comm(int enabled)
3243{
3244 if (!buffers_allocated)
3245 return;
3246
3247 if (enabled)
3248 tracing_start_cmdline_record();
3249 else
3250 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003251}
3252
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003253/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003254 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003255 * @ip: The address of the caller
3256 * @fmt: The string format to write to the buffer
3257 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003258 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003259int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003260{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003261 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003262 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003263 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003264 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003265 struct bprint_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003266 unsigned int trace_ctx;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003267 char *tbuffer;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003268 int len = 0, size;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003269
3270 if (unlikely(tracing_selftest_running || tracing_disabled))
3271 return 0;
3272
3273 /* Don't pollute graph traces with trace_vprintk internals */
3274 pause_graph_tracing();
3275
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003276 trace_ctx = tracing_gen_ctx();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003277 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003278
Steven Rostedt07d777f2011-09-22 14:01:55 -04003279 tbuffer = get_trace_buf();
3280 if (!tbuffer) {
3281 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003282 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003283 }
3284
3285 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3286
3287 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003288 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003289
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003290 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003291 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003292 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003293 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003294 trace_ctx);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003295 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003296 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003297 entry = ring_buffer_event_data(event);
3298 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003299 entry->fmt = fmt;
3300
Steven Rostedt07d777f2011-09-22 14:01:55 -04003301 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003302 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003303 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003304 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003305 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003306
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003307out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003308 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003309out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003310 put_trace_buf();
3311
3312out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003313 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003314 unpause_graph_tracing();
3315
3316 return len;
3317}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003318EXPORT_SYMBOL_GPL(trace_vbprintk);
3319
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003320__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003321static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003322__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003323 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003324{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003325 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003326 struct ring_buffer_event *event;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003327 int len = 0, size;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003328 struct print_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003329 unsigned int trace_ctx;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003330 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003331
3332 if (tracing_disabled || tracing_selftest_running)
3333 return 0;
3334
Steven Rostedt07d777f2011-09-22 14:01:55 -04003335 /* Don't pollute graph traces with trace_vprintk internals */
3336 pause_graph_tracing();
3337
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003338 trace_ctx = tracing_gen_ctx();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003339 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003340
Steven Rostedt07d777f2011-09-22 14:01:55 -04003341
3342 tbuffer = get_trace_buf();
3343 if (!tbuffer) {
3344 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003345 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003346 }
3347
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003348 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003349
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003350 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003351 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003352 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003353 trace_ctx);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003354 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003355 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003356 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003357 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003358
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003359 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003360 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003361 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003362 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003363 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003364
3365out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003366 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003367 put_trace_buf();
3368
3369out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003370 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003371 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003372
3373 return len;
3374}
Steven Rostedt659372d2009-09-03 19:11:07 -04003375
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003376__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003377int trace_array_vprintk(struct trace_array *tr,
3378 unsigned long ip, const char *fmt, va_list args)
3379{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003380 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003381}
3382
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003383/**
3384 * trace_array_printk - Print a message to a specific instance
3385 * @tr: The instance trace_array descriptor
3386 * @ip: The instruction pointer that this is called from.
3387 * @fmt: The format to print (printf format)
3388 *
3389 * If a subsystem sets up its own instance, they have the right to
3390 * printk strings into their tracing instance buffer using this
3391 * function. Note, this function will not write into the top level
3392 * buffer (use trace_printk() for that), as writing into the top level
3393 * buffer should only have events that can be individually disabled.
3394 * trace_printk() is only used for debugging a kernel, and should not
Ingo Molnarf2cc0202021-03-23 18:49:35 +01003395 * be ever incorporated in normal use.
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003396 *
3397 * trace_array_printk() can be used, as it will not add noise to the
3398 * top level tracing buffer.
3399 *
3400 * Note, trace_array_init_printk() must be called on @tr before this
3401 * can be used.
3402 */
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003403__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003404int trace_array_printk(struct trace_array *tr,
3405 unsigned long ip, const char *fmt, ...)
3406{
3407 int ret;
3408 va_list ap;
3409
Divya Indi953ae452019-08-14 10:55:25 -07003410 if (!tr)
3411 return -ENOENT;
3412
Steven Rostedt (VMware)c791cc42020-06-16 14:53:55 -04003413 /* This is only allowed for created instances */
3414 if (tr == &global_trace)
3415 return 0;
3416
3417 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3418 return 0;
3419
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003420 va_start(ap, fmt);
3421 ret = trace_array_vprintk(tr, ip, fmt, ap);
3422 va_end(ap);
3423 return ret;
3424}
Divya Indif45d1222019-03-20 11:28:51 -07003425EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003426
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003427/**
3428 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3429 * @tr: The trace array to initialize the buffers for
3430 *
3431 * As trace_array_printk() only writes into instances, they are OK to
3432 * have in the kernel (unlike trace_printk()). This needs to be called
3433 * before trace_array_printk() can be used on a trace_array.
3434 */
3435int trace_array_init_printk(struct trace_array *tr)
3436{
3437 if (!tr)
3438 return -ENOENT;
3439
3440 /* This is only allowed for created instances */
3441 if (tr == &global_trace)
3442 return -EINVAL;
3443
3444 return alloc_percpu_trace_buffer();
3445}
3446EXPORT_SYMBOL_GPL(trace_array_init_printk);
3447
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003448__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003449int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003450 unsigned long ip, const char *fmt, ...)
3451{
3452 int ret;
3453 va_list ap;
3454
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003455 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003456 return 0;
3457
3458 va_start(ap, fmt);
3459 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3460 va_end(ap);
3461 return ret;
3462}
3463
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003464__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003465int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3466{
Steven Rostedta813a152009-10-09 01:41:35 -04003467 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003468}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003469EXPORT_SYMBOL_GPL(trace_vprintk);
3470
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003471static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003472{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003473 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3474
Steven Rostedt5a90f572008-09-03 17:42:51 -04003475 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003476 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003477 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003478}
3479
Ingo Molnare309b412008-05-12 21:20:51 +02003480static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003481peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3482 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003483{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003484 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003485 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003486
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003487 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003488 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003489 if (lost_events)
3490 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3491 (unsigned long)-1 : 0;
3492 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003493 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003494 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003495 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003496
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003497 if (event) {
3498 iter->ent_size = ring_buffer_event_length(event);
3499 return ring_buffer_event_data(event);
3500 }
3501 iter->ent_size = 0;
3502 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003503}
Steven Rostedtd7690412008-10-01 00:29:53 -04003504
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003505static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003506__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3507 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003508{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003509 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003510 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003511 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003512 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003513 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003514 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003515 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003516 int cpu;
3517
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003518 /*
3519 * If we are in a per_cpu trace file, don't bother by iterating over
3520 * all cpu and peek directly.
3521 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003522 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003523 if (ring_buffer_empty_cpu(buffer, cpu_file))
3524 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003525 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003526 if (ent_cpu)
3527 *ent_cpu = cpu_file;
3528
3529 return ent;
3530 }
3531
Steven Rostedtab464282008-05-12 21:21:00 +02003532 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003533
3534 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003536
Steven Rostedtbc21b472010-03-31 19:49:26 -04003537 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003538
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003539 /*
3540 * Pick the entry with the smallest timestamp:
3541 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003542 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003543 next = ent;
3544 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003545 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003546 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003547 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003548 }
3549 }
3550
Steven Rostedt12b5da32012-03-27 10:43:28 -04003551 iter->ent_size = next_size;
3552
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003553 if (ent_cpu)
3554 *ent_cpu = next_cpu;
3555
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003556 if (ent_ts)
3557 *ent_ts = next_ts;
3558
Steven Rostedtbc21b472010-03-31 19:49:26 -04003559 if (missing_events)
3560 *missing_events = next_lost;
3561
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003562 return next;
3563}
3564
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003565#define STATIC_FMT_BUF_SIZE 128
3566static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3567
3568static char *trace_iter_expand_format(struct trace_iterator *iter)
3569{
3570 char *tmp;
3571
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -04003572 /*
3573 * iter->tr is NULL when used with tp_printk, which makes
3574 * this get called where it is not safe to call krealloc().
3575 */
3576 if (!iter->tr || iter->fmt == static_fmt_buf)
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003577 return NULL;
3578
3579 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3580 GFP_KERNEL);
3581 if (tmp) {
3582 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3583 iter->fmt = tmp;
3584 }
3585
3586 return tmp;
3587}
3588
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003589/* Returns true if the string is safe to dereference from an event */
3590static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3591{
3592 unsigned long addr = (unsigned long)str;
3593 struct trace_event *trace_event;
3594 struct trace_event_call *event;
3595
3596 /* OK if part of the event data */
3597 if ((addr >= (unsigned long)iter->ent) &&
3598 (addr < (unsigned long)iter->ent + iter->ent_size))
3599 return true;
3600
3601 /* OK if part of the temp seq buffer */
3602 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3603 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3604 return true;
3605
3606 /* Core rodata can not be freed */
3607 if (is_kernel_rodata(addr))
3608 return true;
3609
3610 if (trace_is_tracepoint_string(str))
3611 return true;
3612
3613 /*
3614 * Now this could be a module event, referencing core module
3615 * data, which is OK.
3616 */
3617 if (!iter->ent)
3618 return false;
3619
3620 trace_event = ftrace_find_event(iter->ent->type);
3621 if (!trace_event)
3622 return false;
3623
3624 event = container_of(trace_event, struct trace_event_call, event);
3625 if (!event->mod)
3626 return false;
3627
3628 /* Would rather have rodata, but this will suffice */
3629 if (within_module_core(addr, event->mod))
3630 return true;
3631
3632 return false;
3633}
3634
3635static const char *show_buffer(struct trace_seq *s)
3636{
3637 struct seq_buf *seq = &s->seq;
3638
3639 seq_buf_terminate(seq);
3640
3641 return seq->buffer;
3642}
3643
3644static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3645
3646static int test_can_verify_check(const char *fmt, ...)
3647{
3648 char buf[16];
3649 va_list ap;
3650 int ret;
3651
3652 /*
3653 * The verifier is dependent on vsnprintf() modifies the va_list
3654 * passed to it, where it is sent as a reference. Some architectures
3655 * (like x86_32) passes it by value, which means that vsnprintf()
3656 * does not modify the va_list passed to it, and the verifier
3657 * would then need to be able to understand all the values that
3658 * vsnprintf can use. If it is passed by value, then the verifier
3659 * is disabled.
3660 */
3661 va_start(ap, fmt);
3662 vsnprintf(buf, 16, "%d", ap);
3663 ret = va_arg(ap, int);
3664 va_end(ap);
3665
3666 return ret;
3667}
3668
3669static void test_can_verify(void)
3670{
3671 if (!test_can_verify_check("%d %d", 0, 1)) {
3672 pr_info("trace event string verifier disabled\n");
3673 static_branch_inc(&trace_no_verify);
3674 }
3675}
3676
3677/**
3678 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3679 * @iter: The iterator that holds the seq buffer and the event being printed
3680 * @fmt: The format used to print the event
3681 * @ap: The va_list holding the data to print from @fmt.
3682 *
3683 * This writes the data into the @iter->seq buffer using the data from
3684 * @fmt and @ap. If the format has a %s, then the source of the string
3685 * is examined to make sure it is safe to print, otherwise it will
3686 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3687 * pointer.
3688 */
3689void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3690 va_list ap)
3691{
3692 const char *p = fmt;
3693 const char *str;
3694 int i, j;
3695
3696 if (WARN_ON_ONCE(!fmt))
3697 return;
3698
3699 if (static_branch_unlikely(&trace_no_verify))
3700 goto print;
3701
3702 /* Don't bother checking when doing a ftrace_dump() */
3703 if (iter->fmt == static_fmt_buf)
3704 goto print;
3705
3706 while (*p) {
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003707 bool star = false;
3708 int len = 0;
3709
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003710 j = 0;
3711
3712 /* We only care about %s and variants */
3713 for (i = 0; p[i]; i++) {
3714 if (i + 1 >= iter->fmt_size) {
3715 /*
3716 * If we can't expand the copy buffer,
3717 * just print it.
3718 */
3719 if (!trace_iter_expand_format(iter))
3720 goto print;
3721 }
3722
3723 if (p[i] == '\\' && p[i+1]) {
3724 i++;
3725 continue;
3726 }
3727 if (p[i] == '%') {
3728 /* Need to test cases like %08.*s */
3729 for (j = 1; p[i+j]; j++) {
3730 if (isdigit(p[i+j]) ||
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003731 p[i+j] == '.')
3732 continue;
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003733 if (p[i+j] == '*') {
3734 star = true;
3735 continue;
3736 }
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003737 break;
3738 }
3739 if (p[i+j] == 's')
3740 break;
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003741 star = false;
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003742 }
3743 j = 0;
3744 }
3745 /* If no %s found then just print normally */
3746 if (!p[i])
3747 break;
3748
3749 /* Copy up to the %s, and print that */
3750 strncpy(iter->fmt, p, i);
3751 iter->fmt[i] = '\0';
3752 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3753
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003754 if (star)
3755 len = va_arg(ap, int);
3756
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003757 /* The ap now points to the string data of the %s */
3758 str = va_arg(ap, const char *);
3759
3760 /*
3761 * If you hit this warning, it is likely that the
3762 * trace event in question used %s on a string that
3763 * was saved at the time of the event, but may not be
3764 * around when the trace is read. Use __string(),
3765 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3766 * instead. See samples/trace_events/trace-events-sample.h
3767 * for reference.
3768 */
3769 if (WARN_ONCE(!trace_safe_str(iter, str),
3770 "fmt: '%s' current_buffer: '%s'",
3771 fmt, show_buffer(&iter->seq))) {
3772 int ret;
3773
3774 /* Try to safely read the string */
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003775 if (star) {
3776 if (len + 1 > iter->fmt_size)
3777 len = iter->fmt_size - 1;
3778 if (len < 0)
3779 len = 0;
3780 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3781 iter->fmt[len] = 0;
3782 star = false;
3783 } else {
3784 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3785 iter->fmt_size);
3786 }
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003787 if (ret < 0)
3788 trace_seq_printf(&iter->seq, "(0x%px)", str);
3789 else
3790 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3791 str, iter->fmt);
3792 str = "[UNSAFE-MEMORY]";
3793 strcpy(iter->fmt, "%s");
3794 } else {
3795 strncpy(iter->fmt, p + i, j + 1);
3796 iter->fmt[j+1] = '\0';
3797 }
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003798 if (star)
3799 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3800 else
3801 trace_seq_printf(&iter->seq, iter->fmt, str);
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003802
3803 p += i + j + 1;
3804 }
3805 print:
3806 if (*p)
3807 trace_seq_vprintf(&iter->seq, p, ap);
3808}
3809
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003810const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3811{
3812 const char *p, *new_fmt;
3813 char *q;
3814
3815 if (WARN_ON_ONCE(!fmt))
3816 return fmt;
3817
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -04003818 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
Masami Hiramatsua345a672020-10-15 23:55:25 +09003819 return fmt;
3820
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003821 p = fmt;
3822 new_fmt = q = iter->fmt;
3823 while (*p) {
3824 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3825 if (!trace_iter_expand_format(iter))
3826 return fmt;
3827
3828 q += iter->fmt - new_fmt;
3829 new_fmt = iter->fmt;
3830 }
3831
3832 *q++ = *p++;
3833
3834 /* Replace %p with %px */
3835 if (p[-1] == '%') {
3836 if (p[0] == '%') {
3837 *q++ = *p++;
3838 } else if (p[0] == 'p' && !isalnum(p[1])) {
3839 *q++ = *p++;
3840 *q++ = 'x';
3841 }
3842 }
3843 }
3844 *q = '\0';
3845
3846 return new_fmt;
3847}
3848
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003849#define STATIC_TEMP_BUF_SIZE 128
Minchan Kim8fa655a2020-11-25 14:56:54 -08003850static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003851
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003852/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003853struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3854 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003855{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003856 /* __find_next_entry will reset ent_size */
3857 int ent_size = iter->ent_size;
3858 struct trace_entry *entry;
3859
3860 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003861 * If called from ftrace_dump(), then the iter->temp buffer
3862 * will be the static_temp_buf and not created from kmalloc.
3863 * If the entry size is greater than the buffer, we can
3864 * not save it. Just return NULL in that case. This is only
3865 * used to add markers when two consecutive events' time
3866 * stamps have a large delta. See trace_print_lat_context()
3867 */
3868 if (iter->temp == static_temp_buf &&
3869 STATIC_TEMP_BUF_SIZE < ent_size)
3870 return NULL;
3871
3872 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003873 * The __find_next_entry() may call peek_next_entry(), which may
3874 * call ring_buffer_peek() that may make the contents of iter->ent
3875 * undefined. Need to copy iter->ent now.
3876 */
3877 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003878 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3879 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003880 void *temp;
3881 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3882 if (!temp)
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003883 return NULL;
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003884 kfree(iter->temp);
3885 iter->temp = temp;
3886 iter->temp_size = iter->ent_size;
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003887 }
3888 memcpy(iter->temp, iter->ent, iter->ent_size);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003889 iter->ent = iter->temp;
3890 }
3891 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3892 /* Put back the original ent_size */
3893 iter->ent_size = ent_size;
3894
3895 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003896}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003897
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003898/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003899void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003900{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003901 iter->ent = __find_next_entry(iter, &iter->cpu,
3902 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003903
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003904 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003905 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003906
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003907 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003908}
3909
Ingo Molnare309b412008-05-12 21:20:51 +02003910static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003911{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003912 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003913 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003914}
3915
Ingo Molnare309b412008-05-12 21:20:51 +02003916static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003917{
3918 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003919 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02003920 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003921
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003922 WARN_ON_ONCE(iter->leftover);
3923
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003924 (*pos)++;
3925
3926 /* can't go backwards */
3927 if (iter->idx > i)
3928 return NULL;
3929
3930 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05003931 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003932 else
3933 ent = iter;
3934
3935 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05003936 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003937
3938 iter->pos = *pos;
3939
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003940 return ent;
3941}
3942
Jason Wessel955b61e2010-08-05 09:22:23 -05003943void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003944{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003945 struct ring_buffer_iter *buf_iter;
3946 unsigned long entries = 0;
3947 u64 ts;
3948
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003949 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003950
Steven Rostedt6d158a82012-06-27 20:46:14 -04003951 buf_iter = trace_buffer_iter(iter, cpu);
3952 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003953 return;
3954
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003955 ring_buffer_iter_reset(buf_iter);
3956
3957 /*
3958 * We could have the case with the max latency tracers
3959 * that a reset never took place on a cpu. This is evident
3960 * by the timestamp being before the start of the buffer.
3961 */
YangHui69243722020-06-16 11:36:46 +08003962 while (ring_buffer_iter_peek(buf_iter, &ts)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003963 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003964 break;
3965 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003966 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003967 }
3968
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003969 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003970}
3971
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003972/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003973 * The current tracer is copied to avoid a global locking
3974 * all around.
3975 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003976static void *s_start(struct seq_file *m, loff_t *pos)
3977{
3978 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003979 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003980 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003981 void *p = NULL;
3982 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003983 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003984
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09003985 /*
3986 * copy the tracer to avoid using a global lock all around.
3987 * iter->trace is a copy of current_trace, the pointer to the
3988 * name may be used instead of a strcmp(), as iter->trace->name
3989 * will point to the same string as current_trace->name.
3990 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003991 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003992 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3993 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003994 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003995
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003996#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003997 if (iter->snapshot && iter->trace->use_max_tr)
3998 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003999#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004000
4001 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07004002 atomic_inc(&trace_record_taskinfo_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004003
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004004 if (*pos != iter->pos) {
4005 iter->ent = NULL;
4006 iter->cpu = 0;
4007 iter->idx = -1;
4008
Steven Rostedtae3b5092013-01-23 15:22:59 -05004009 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004010 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004011 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004012 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004013 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004014
Lai Jiangshanac91d852010-03-02 17:54:50 +08004015 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004016 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4017 ;
4018
4019 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004020 /*
4021 * If we overflowed the seq_file before, then we want
4022 * to just reuse the trace_seq buffer again.
4023 */
4024 if (iter->leftover)
4025 p = iter;
4026 else {
4027 l = *pos - 1;
4028 p = s_next(m, p, &l);
4029 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004030 }
4031
Lai Jiangshan4f535962009-05-18 19:35:34 +08004032 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004033 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004034 return p;
4035}
4036
4037static void s_stop(struct seq_file *m, void *p)
4038{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004039 struct trace_iterator *iter = m->private;
4040
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004041#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004042 if (iter->snapshot && iter->trace->use_max_tr)
4043 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004044#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004045
4046 if (!iter->snapshot)
Joel Fernandesd914ba32017-06-26 19:01:55 -07004047 atomic_dec(&trace_record_taskinfo_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004048
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004049 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004050 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004051}
4052
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004053static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004054get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004055 unsigned long *entries, int cpu)
4056{
4057 unsigned long count;
4058
4059 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4060 /*
4061 * If this buffer has skipped entries, then we hold all
4062 * entries for the trace and we need to ignore the
4063 * ones before the time stamp.
4064 */
4065 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4066 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4067 /* total is the same as the entries */
4068 *total = count;
4069 } else
4070 *total = count +
4071 ring_buffer_overrun_cpu(buf->buffer, cpu);
4072 *entries = count;
4073}
4074
4075static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004076get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004077 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004078{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004079 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004080 int cpu;
4081
4082 *total = 0;
4083 *entries = 0;
4084
4085 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004086 get_total_entries_cpu(buf, &t, &e, cpu);
4087 *total += t;
4088 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004089 }
4090}
4091
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004092unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4093{
4094 unsigned long total, entries;
4095
4096 if (!tr)
4097 tr = &global_trace;
4098
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004099 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004100
4101 return entries;
4102}
4103
4104unsigned long trace_total_entries(struct trace_array *tr)
4105{
4106 unsigned long total, entries;
4107
4108 if (!tr)
4109 tr = &global_trace;
4110
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004111 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004112
4113 return entries;
4114}
4115
Ingo Molnare309b412008-05-12 21:20:51 +02004116static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004117{
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004118 seq_puts(m, "# _------=> CPU# \n"
4119 "# / _-----=> irqs-off \n"
4120 "# | / _----=> need-resched \n"
4121 "# || / _---=> hardirq/softirq \n"
4122 "# ||| / _--=> preempt-depth \n"
4123 "# |||| / delay \n"
4124 "# cmd pid ||||| time | caller \n"
4125 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004126}
4127
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004128static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004129{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004130 unsigned long total;
4131 unsigned long entries;
4132
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004133 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004134 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4135 entries, total, num_online_cpus());
4136 seq_puts(m, "#\n");
4137}
4138
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004139static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004140 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004141{
Joel Fernandes441dae82017-06-25 22:38:43 -07004142 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4143
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004144 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07004145
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004146 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4147 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004148}
4149
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004150static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004151 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05004152{
Joel Fernandes441dae82017-06-25 22:38:43 -07004153 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004154 const char *space = " ";
4155 int prec = tgid ? 12 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07004156
Quentin Perret9e738212019-02-14 15:29:50 +00004157 print_event_info(buf, m);
4158
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004159 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
4160 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4161 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4162 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4163 seq_printf(m, "# %.*s||| / delay\n", prec, space);
4164 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4165 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05004166}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004167
Jiri Olsa62b915f2010-04-02 19:01:22 +02004168void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004169print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4170{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004171 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004172 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004173 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004174 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004175 unsigned long entries;
4176 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004177 const char *name = "preemption";
4178
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05004179 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004180
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004181 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004182
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004183 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004184 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004185 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004186 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004187 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004188 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02004189 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004190 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02004191 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004192 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004193#if defined(CONFIG_PREEMPT_NONE)
4194 "server",
4195#elif defined(CONFIG_PREEMPT_VOLUNTARY)
4196 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04004197#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004198 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02004199#elif defined(CONFIG_PREEMPT_RT)
4200 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004201#else
4202 "unknown",
4203#endif
4204 /* These are reserved for later use */
4205 0, 0, 0, 0);
4206#ifdef CONFIG_SMP
4207 seq_printf(m, " #P:%d)\n", num_online_cpus());
4208#else
4209 seq_puts(m, ")\n");
4210#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004211 seq_puts(m, "# -----------------\n");
4212 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004213 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07004214 data->comm, data->pid,
4215 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004216 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004217 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004218
4219 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004220 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02004221 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4222 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004223 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02004224 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4225 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04004226 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004227 }
4228
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004229 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004230}
4231
Steven Rostedta3097202008-11-07 22:36:02 -05004232static void test_cpu_buff_start(struct trace_iterator *iter)
4233{
4234 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004235 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05004236
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004237 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004238 return;
4239
4240 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4241 return;
4242
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07004243 if (cpumask_available(iter->started) &&
4244 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05004245 return;
4246
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004247 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004248 return;
4249
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07004250 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04004251 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004252
4253 /* Don't print started cpu buffer for the first entry of the trace */
4254 if (iter->idx > 1)
4255 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4256 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05004257}
4258
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004259static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004260{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004261 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02004262 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004263 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004264 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004265 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004266
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004267 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004268
Steven Rostedta3097202008-11-07 22:36:02 -05004269 test_cpu_buff_start(iter);
4270
Steven Rostedtf633cef2008-12-23 23:24:13 -05004271 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004272
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004273 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004274 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4275 trace_print_lat_context(iter);
4276 else
4277 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004278 }
4279
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004280 if (trace_seq_has_overflowed(s))
4281 return TRACE_TYPE_PARTIAL_LINE;
4282
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004283 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004284 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004285
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004286 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004287
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004288 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004289}
4290
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004291static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004292{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004293 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004294 struct trace_seq *s = &iter->seq;
4295 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004296 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004297
4298 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004299
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004300 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004301 trace_seq_printf(s, "%d %d %llu ",
4302 entry->pid, iter->cpu, iter->ts);
4303
4304 if (trace_seq_has_overflowed(s))
4305 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004306
Steven Rostedtf633cef2008-12-23 23:24:13 -05004307 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004308 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004309 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004310
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004311 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004312
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004313 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004314}
4315
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004316static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004317{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004318 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004319 struct trace_seq *s = &iter->seq;
4320 unsigned char newline = '\n';
4321 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004322 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004323
4324 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004325
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004326 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004327 SEQ_PUT_HEX_FIELD(s, entry->pid);
4328 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4329 SEQ_PUT_HEX_FIELD(s, iter->ts);
4330 if (trace_seq_has_overflowed(s))
4331 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004332 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004333
Steven Rostedtf633cef2008-12-23 23:24:13 -05004334 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004335 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04004336 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004337 if (ret != TRACE_TYPE_HANDLED)
4338 return ret;
4339 }
Steven Rostedt7104f302008-10-01 10:52:51 -04004340
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004341 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004342
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004343 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004344}
4345
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004346static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004347{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004348 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004349 struct trace_seq *s = &iter->seq;
4350 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004351 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004352
4353 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004354
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004355 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004356 SEQ_PUT_FIELD(s, entry->pid);
4357 SEQ_PUT_FIELD(s, iter->cpu);
4358 SEQ_PUT_FIELD(s, iter->ts);
4359 if (trace_seq_has_overflowed(s))
4360 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004361 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004362
Steven Rostedtf633cef2008-12-23 23:24:13 -05004363 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04004364 return event ? event->funcs->binary(iter, 0, event) :
4365 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004366}
4367
Jiri Olsa62b915f2010-04-02 19:01:22 +02004368int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004369{
Steven Rostedt6d158a82012-06-27 20:46:14 -04004370 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004371 int cpu;
4372
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004373 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05004374 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004375 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004376 buf_iter = trace_buffer_iter(iter, cpu);
4377 if (buf_iter) {
4378 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004379 return 0;
4380 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004381 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004382 return 0;
4383 }
4384 return 1;
4385 }
4386
Steven Rostedtab464282008-05-12 21:21:00 +02004387 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004388 buf_iter = trace_buffer_iter(iter, cpu);
4389 if (buf_iter) {
4390 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004391 return 0;
4392 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004393 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004394 return 0;
4395 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004396 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004397
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004398 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004399}
4400
Lai Jiangshan4f535962009-05-18 19:35:34 +08004401/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004402enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004403{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004404 struct trace_array *tr = iter->tr;
4405 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004406 enum print_line_t ret;
4407
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004408 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004409 if (iter->lost_events == (unsigned long)-1)
4410 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4411 iter->cpu);
4412 else
4413 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4414 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004415 if (trace_seq_has_overflowed(&iter->seq))
4416 return TRACE_TYPE_PARTIAL_LINE;
4417 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004418
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004419 if (iter->trace && iter->trace->print_line) {
4420 ret = iter->trace->print_line(iter);
4421 if (ret != TRACE_TYPE_UNHANDLED)
4422 return ret;
4423 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004424
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004425 if (iter->ent->type == TRACE_BPUTS &&
4426 trace_flags & TRACE_ITER_PRINTK &&
4427 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4428 return trace_print_bputs_msg_only(iter);
4429
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004430 if (iter->ent->type == TRACE_BPRINT &&
4431 trace_flags & TRACE_ITER_PRINTK &&
4432 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004433 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004434
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004435 if (iter->ent->type == TRACE_PRINT &&
4436 trace_flags & TRACE_ITER_PRINTK &&
4437 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004438 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004439
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004440 if (trace_flags & TRACE_ITER_BIN)
4441 return print_bin_fmt(iter);
4442
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004443 if (trace_flags & TRACE_ITER_HEX)
4444 return print_hex_fmt(iter);
4445
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004446 if (trace_flags & TRACE_ITER_RAW)
4447 return print_raw_fmt(iter);
4448
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004449 return print_trace_fmt(iter);
4450}
4451
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004452void trace_latency_header(struct seq_file *m)
4453{
4454 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004455 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004456
4457 /* print nothing if the buffers are empty */
4458 if (trace_empty(iter))
4459 return;
4460
4461 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4462 print_trace_header(m, iter);
4463
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004464 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004465 print_lat_help_header(m);
4466}
4467
Jiri Olsa62b915f2010-04-02 19:01:22 +02004468void trace_default_header(struct seq_file *m)
4469{
4470 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004471 struct trace_array *tr = iter->tr;
4472 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004473
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004474 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4475 return;
4476
Jiri Olsa62b915f2010-04-02 19:01:22 +02004477 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4478 /* print nothing if the buffers are empty */
4479 if (trace_empty(iter))
4480 return;
4481 print_trace_header(m, iter);
4482 if (!(trace_flags & TRACE_ITER_VERBOSE))
4483 print_lat_help_header(m);
4484 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004485 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4486 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004487 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004488 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004489 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004490 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004491 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004492 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004493 }
4494}
4495
Steven Rostedte0a413f2011-09-29 21:26:16 -04004496static void test_ftrace_alive(struct seq_file *m)
4497{
4498 if (!ftrace_is_dead())
4499 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004500 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4501 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004502}
4503
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004504#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004505static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004506{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004507 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4508 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4509 "# Takes a snapshot of the main buffer.\n"
4510 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4511 "# (Doesn't have to be '2' works with any number that\n"
4512 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004513}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004514
4515static void show_snapshot_percpu_help(struct seq_file *m)
4516{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004517 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004518#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004519 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4520 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004521#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004522 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4523 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004524#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004525 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4526 "# (Doesn't have to be '2' works with any number that\n"
4527 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004528}
4529
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004530static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4531{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004532 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004533 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004534 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004535 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004536
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004537 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004538 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4539 show_snapshot_main_help(m);
4540 else
4541 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004542}
4543#else
4544/* Should never be called */
4545static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4546#endif
4547
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004548static int s_show(struct seq_file *m, void *v)
4549{
4550 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004551 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004552
4553 if (iter->ent == NULL) {
4554 if (iter->tr) {
4555 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4556 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004557 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004558 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004559 if (iter->snapshot && trace_empty(iter))
4560 print_snapshot_help(m, iter);
4561 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004562 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004563 else
4564 trace_default_header(m);
4565
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004566 } else if (iter->leftover) {
4567 /*
4568 * If we filled the seq_file buffer earlier, we
4569 * want to just show it now.
4570 */
4571 ret = trace_print_seq(m, &iter->seq);
4572
4573 /* ret should this time be zero, but you never know */
4574 iter->leftover = ret;
4575
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004576 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004577 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004578 ret = trace_print_seq(m, &iter->seq);
4579 /*
4580 * If we overflow the seq_file buffer, then it will
4581 * ask us for this data again at start up.
4582 * Use that instead.
4583 * ret is 0 if seq_file write succeeded.
4584 * -1 otherwise.
4585 */
4586 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004587 }
4588
4589 return 0;
4590}
4591
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004592/*
4593 * Should be used after trace_array_get(), trace_types_lock
4594 * ensures that i_cdev was already initialized.
4595 */
4596static inline int tracing_get_cpu(struct inode *inode)
4597{
4598 if (inode->i_cdev) /* See trace_create_cpu_file() */
4599 return (long)inode->i_cdev - 1;
4600 return RING_BUFFER_ALL_CPUS;
4601}
4602
James Morris88e9d342009-09-22 16:43:43 -07004603static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004604 .start = s_start,
4605 .next = s_next,
4606 .stop = s_stop,
4607 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004608};
4609
Ingo Molnare309b412008-05-12 21:20:51 +02004610static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004611__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004612{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004613 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004614 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004615 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004616
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004617 if (tracing_disabled)
4618 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004619
Jiri Olsa50e18b92012-04-25 10:23:39 +02004620 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004621 if (!iter)
4622 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004623
Gil Fruchter72917232015-06-09 10:32:35 +03004624 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004625 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004626 if (!iter->buffer_iter)
4627 goto release;
4628
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004629 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004630 * trace_find_next_entry() may need to save off iter->ent.
4631 * It will place it into the iter->temp buffer. As most
4632 * events are less than 128, allocate a buffer of that size.
4633 * If one is greater, then trace_find_next_entry() will
4634 * allocate a new buffer to adjust for the bigger iter->ent.
4635 * It's not critical if it fails to get allocated here.
4636 */
4637 iter->temp = kmalloc(128, GFP_KERNEL);
4638 if (iter->temp)
4639 iter->temp_size = 128;
4640
4641 /*
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09004642 * trace_event_printf() may need to modify given format
4643 * string to replace %p with %px so that it shows real address
4644 * instead of hash value. However, that is only for the event
4645 * tracing, other tracer may not need. Defer the allocation
4646 * until it is needed.
4647 */
4648 iter->fmt = NULL;
4649 iter->fmt_size = 0;
4650
4651 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004652 * We make a copy of the current tracer to avoid concurrent
4653 * changes on it while we are reading.
4654 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004655 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004656 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004657 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004658 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004659
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004660 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004661
Li Zefan79f55992009-06-15 14:58:26 +08004662 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004663 goto fail;
4664
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004665 iter->tr = tr;
4666
4667#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004668 /* Currently only the top directory has a snapshot */
4669 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004670 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004671 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004672#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004673 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004674 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004675 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004676 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004677 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004678
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004679 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004680 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004681 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004682
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004683 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004684 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004685 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4686
David Sharp8be07092012-11-13 12:18:22 -08004687 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004688 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004689 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4690
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004691 /*
4692 * If pause-on-trace is enabled, then stop the trace while
4693 * dumping, unless this is the "snapshot" file
4694 */
4695 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004696 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004697
Steven Rostedtae3b5092013-01-23 15:22:59 -05004698 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004699 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004700 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004701 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004702 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004703 }
4704 ring_buffer_read_prepare_sync();
4705 for_each_tracing_cpu(cpu) {
4706 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004707 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004708 }
4709 } else {
4710 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004711 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004712 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004713 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004714 ring_buffer_read_prepare_sync();
4715 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004716 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004717 }
4718
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004719 mutex_unlock(&trace_types_lock);
4720
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004721 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004722
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004723 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004724 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004725 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004726 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004727 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004728release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004729 seq_release_private(inode, file);
4730 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004731}
4732
4733int tracing_open_generic(struct inode *inode, struct file *filp)
4734{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004735 int ret;
4736
4737 ret = tracing_check_open_get_tr(NULL);
4738 if (ret)
4739 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004740
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004741 filp->private_data = inode->i_private;
4742 return 0;
4743}
4744
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004745bool tracing_is_disabled(void)
4746{
4747 return (tracing_disabled) ? true: false;
4748}
4749
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004750/*
4751 * Open and update trace_array ref count.
4752 * Must have the current trace_array passed to it.
4753 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004754int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004755{
4756 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004757 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004758
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004759 ret = tracing_check_open_get_tr(tr);
4760 if (ret)
4761 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004762
4763 filp->private_data = inode->i_private;
4764
4765 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004766}
4767
Hannes Eder4fd27352009-02-10 19:44:12 +01004768static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004769{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004770 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004771 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004772 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004773 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004774
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004775 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004776 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004777 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004778 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004779
Oleg Nesterov6484c712013-07-23 17:26:10 +02004780 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004781 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004782 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004783
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004784 for_each_tracing_cpu(cpu) {
4785 if (iter->buffer_iter[cpu])
4786 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4787 }
4788
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004789 if (iter->trace && iter->trace->close)
4790 iter->trace->close(iter);
4791
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004792 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004793 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004794 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004795
4796 __trace_array_put(tr);
4797
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004798 mutex_unlock(&trace_types_lock);
4799
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004800 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004801 free_cpumask_var(iter->started);
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09004802 kfree(iter->fmt);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004803 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004804 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004805 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004806 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004807
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004808 return 0;
4809}
4810
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004811static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4812{
4813 struct trace_array *tr = inode->i_private;
4814
4815 trace_array_put(tr);
4816 return 0;
4817}
4818
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004819static int tracing_single_release_tr(struct inode *inode, struct file *file)
4820{
4821 struct trace_array *tr = inode->i_private;
4822
4823 trace_array_put(tr);
4824
4825 return single_release(inode, file);
4826}
4827
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004828static int tracing_open(struct inode *inode, struct file *file)
4829{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004830 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004831 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004832 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004833
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004834 ret = tracing_check_open_get_tr(tr);
4835 if (ret)
4836 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004837
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004838 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004839 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4840 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004841 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004842
4843#ifdef CONFIG_TRACER_MAX_TRACE
4844 if (tr->current_trace->print_max)
4845 trace_buf = &tr->max_buffer;
4846#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004847
4848 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004849 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004850 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004851 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004852 }
4853
4854 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004855 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004856 if (IS_ERR(iter))
4857 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004858 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004859 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4860 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004861
4862 if (ret < 0)
4863 trace_array_put(tr);
4864
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004865 return ret;
4866}
4867
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004868/*
4869 * Some tracers are not suitable for instance buffers.
4870 * A tracer is always available for the global array (toplevel)
4871 * or if it explicitly states that it is.
4872 */
4873static bool
4874trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4875{
4876 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4877}
4878
4879/* Find the next tracer that this trace array may use */
4880static struct tracer *
4881get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4882{
4883 while (t && !trace_ok_for_array(t, tr))
4884 t = t->next;
4885
4886 return t;
4887}
4888
Ingo Molnare309b412008-05-12 21:20:51 +02004889static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004890t_next(struct seq_file *m, void *v, loff_t *pos)
4891{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004892 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004893 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004894
4895 (*pos)++;
4896
4897 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004898 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004899
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004900 return t;
4901}
4902
4903static void *t_start(struct seq_file *m, loff_t *pos)
4904{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004905 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004906 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004907 loff_t l = 0;
4908
4909 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004910
4911 t = get_tracer_for_array(tr, trace_types);
4912 for (; t && l < *pos; t = t_next(m, t, &l))
4913 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004914
4915 return t;
4916}
4917
4918static void t_stop(struct seq_file *m, void *p)
4919{
4920 mutex_unlock(&trace_types_lock);
4921}
4922
4923static int t_show(struct seq_file *m, void *v)
4924{
4925 struct tracer *t = v;
4926
4927 if (!t)
4928 return 0;
4929
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004930 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004931 if (t->next)
4932 seq_putc(m, ' ');
4933 else
4934 seq_putc(m, '\n');
4935
4936 return 0;
4937}
4938
James Morris88e9d342009-09-22 16:43:43 -07004939static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004940 .start = t_start,
4941 .next = t_next,
4942 .stop = t_stop,
4943 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004944};
4945
4946static int show_traces_open(struct inode *inode, struct file *file)
4947{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004948 struct trace_array *tr = inode->i_private;
4949 struct seq_file *m;
4950 int ret;
4951
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004952 ret = tracing_check_open_get_tr(tr);
4953 if (ret)
4954 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004955
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004956 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004957 if (ret) {
4958 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004959 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004960 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004961
4962 m = file->private_data;
4963 m->private = tr;
4964
4965 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004966}
4967
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04004968static int show_traces_release(struct inode *inode, struct file *file)
4969{
4970 struct trace_array *tr = inode->i_private;
4971
4972 trace_array_put(tr);
4973 return seq_release(inode, file);
4974}
4975
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004976static ssize_t
4977tracing_write_stub(struct file *filp, const char __user *ubuf,
4978 size_t count, loff_t *ppos)
4979{
4980 return count;
4981}
4982
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004983loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08004984{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004985 int ret;
4986
Slava Pestov364829b2010-11-24 15:13:16 -08004987 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004988 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08004989 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004990 file->f_pos = ret = 0;
4991
4992 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08004993}
4994
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004995static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004996 .open = tracing_open,
4997 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004998 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05004999 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005000 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005001};
5002
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005003static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005004 .open = show_traces_open,
5005 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005006 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005007 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02005008};
5009
5010static ssize_t
5011tracing_cpumask_read(struct file *filp, char __user *ubuf,
5012 size_t count, loff_t *ppos)
5013{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005014 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08005015 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005016 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02005017
Changbin Du90e406f2017-11-30 11:39:43 +08005018 len = snprintf(NULL, 0, "%*pb\n",
5019 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5020 mask_str = kmalloc(len, GFP_KERNEL);
5021 if (!mask_str)
5022 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005023
Changbin Du90e406f2017-11-30 11:39:43 +08005024 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08005025 cpumask_pr_args(tr->tracing_cpumask));
5026 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02005027 count = -EINVAL;
5028 goto out_err;
5029 }
Changbin Du90e406f2017-11-30 11:39:43 +08005030 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005031
5032out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08005033 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005034
5035 return count;
5036}
5037
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005038int tracing_set_cpumask(struct trace_array *tr,
5039 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02005040{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005041 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305042
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005043 if (!tr)
5044 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005045
Steven Rostedta5e25882008-12-02 15:34:05 -05005046 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05005047 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02005048 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02005049 /*
5050 * Increase/decrease the disabled counter if we are
5051 * about to flip a bit in the cpumask:
5052 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005053 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305054 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005055 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5056 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005057 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005058 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305059 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005060 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5061 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005062 }
5063 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05005064 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05005065 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02005066
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005067 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005068
5069 return 0;
5070}
5071
5072static ssize_t
5073tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5074 size_t count, loff_t *ppos)
5075{
5076 struct trace_array *tr = file_inode(filp)->i_private;
5077 cpumask_var_t tracing_cpumask_new;
5078 int err;
5079
Tetsuo Handac5e3a412021-04-01 14:58:23 +09005080 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005081 return -ENOMEM;
5082
5083 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5084 if (err)
5085 goto err_free;
5086
5087 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5088 if (err)
5089 goto err_free;
5090
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305091 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005092
Ingo Molnarc7078de2008-05-12 21:20:52 +02005093 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005094
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005095err_free:
Li Zefan215368e2009-06-15 10:56:42 +08005096 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005097
5098 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02005099}
5100
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005101static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005102 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02005103 .read = tracing_cpumask_read,
5104 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005105 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005106 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005107};
5108
Li Zefanfdb372e2009-12-08 11:15:59 +08005109static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005110{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005111 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005112 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005113 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005114 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005115
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005116 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005117 tracer_flags = tr->current_trace->flags->val;
5118 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005119
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005120 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005121 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08005122 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005123 else
Li Zefanfdb372e2009-12-08 11:15:59 +08005124 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005125 }
5126
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005127 for (i = 0; trace_opts[i].name; i++) {
5128 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08005129 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005130 else
Li Zefanfdb372e2009-12-08 11:15:59 +08005131 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005132 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005133 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005134
Li Zefanfdb372e2009-12-08 11:15:59 +08005135 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005136}
5137
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005138static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08005139 struct tracer_flags *tracer_flags,
5140 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005141{
Chunyu Hud39cdd22016-03-08 21:37:01 +08005142 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005143 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005144
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005145 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005146 if (ret)
5147 return ret;
5148
5149 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08005150 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005151 else
Zhaolei77708412009-08-07 18:53:21 +08005152 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005153 return 0;
5154}
5155
Li Zefan8d18eaa2009-12-08 11:17:06 +08005156/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005157static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08005158{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005159 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005160 struct tracer_flags *tracer_flags = trace->flags;
5161 struct tracer_opt *opts = NULL;
5162 int i;
5163
5164 for (i = 0; tracer_flags->opts[i].name; i++) {
5165 opts = &tracer_flags->opts[i];
5166
5167 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005168 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005169 }
5170
5171 return -EINVAL;
5172}
5173
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005174/* Some tracers require overwrite to stay enabled */
5175int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5176{
5177 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5178 return -1;
5179
5180 return 0;
5181}
5182
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005183int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005184{
Prateek Sood3a53acf2019-12-10 09:15:16 +00005185 if ((mask == TRACE_ITER_RECORD_TGID) ||
5186 (mask == TRACE_ITER_RECORD_CMD))
5187 lockdep_assert_held(&event_mutex);
5188
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005189 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005190 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005191 return 0;
5192
5193 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005194 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05005195 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005196 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005197
5198 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005199 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005200 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005201 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08005202
5203 if (mask == TRACE_ITER_RECORD_CMD)
5204 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08005205
Joel Fernandesd914ba32017-06-26 19:01:55 -07005206 if (mask == TRACE_ITER_RECORD_TGID) {
5207 if (!tgid_map)
Yuming Han6ee40512019-10-24 11:34:30 +08005208 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
Kees Cook6396bb22018-06-12 14:03:40 -07005209 sizeof(*tgid_map),
Joel Fernandesd914ba32017-06-26 19:01:55 -07005210 GFP_KERNEL);
5211 if (!tgid_map) {
5212 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5213 return -ENOMEM;
5214 }
5215
5216 trace_event_enable_tgid_record(enabled);
5217 }
5218
Steven Rostedtc37775d2016-04-13 16:59:18 -04005219 if (mask == TRACE_ITER_EVENT_FORK)
5220 trace_event_follow_fork(tr, enabled);
5221
Namhyung Kim1e104862017-04-17 11:44:28 +09005222 if (mask == TRACE_ITER_FUNC_FORK)
5223 ftrace_pid_follow_fork(tr, enabled);
5224
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005225 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005226 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005227#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005228 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005229#endif
5230 }
Steven Rostedt81698832012-10-11 10:15:05 -04005231
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04005232 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04005233 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04005234 trace_printk_control(enabled);
5235 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005236
5237 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005238}
5239
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005240int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005241{
Li Zefan8d18eaa2009-12-08 11:17:06 +08005242 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005243 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08005244 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005245 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005246 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005247
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005248 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005249
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005250 len = str_has_prefix(cmp, "no");
5251 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005252 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005253
5254 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005255
Prateek Sood3a53acf2019-12-10 09:15:16 +00005256 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005257 mutex_lock(&trace_types_lock);
5258
Yisheng Xie591a0332018-05-17 16:36:03 +08005259 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005260 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08005261 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005262 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08005263 else
5264 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005265
5266 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00005267 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005268
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005269 /*
5270 * If the first trailing whitespace is replaced with '\0' by strstrip,
5271 * turn it back into a space.
5272 */
5273 if (orig_len > strlen(option))
5274 option[strlen(option)] = ' ';
5275
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005276 return ret;
5277}
5278
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005279static void __init apply_trace_boot_options(void)
5280{
5281 char *buf = trace_boot_options_buf;
5282 char *option;
5283
5284 while (true) {
5285 option = strsep(&buf, ",");
5286
5287 if (!option)
5288 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005289
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05005290 if (*option)
5291 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005292
5293 /* Put back the comma to allow this to be called again */
5294 if (buf)
5295 *(buf - 1) = ',';
5296 }
5297}
5298
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005299static ssize_t
5300tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5301 size_t cnt, loff_t *ppos)
5302{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005303 struct seq_file *m = filp->private_data;
5304 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005305 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005306 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005307
5308 if (cnt >= sizeof(buf))
5309 return -EINVAL;
5310
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005311 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005312 return -EFAULT;
5313
Steven Rostedta8dd2172013-01-09 20:54:17 -05005314 buf[cnt] = 0;
5315
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005316 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005317 if (ret < 0)
5318 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005319
Jiri Olsacf8517c2009-10-23 19:36:16 -04005320 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005321
5322 return cnt;
5323}
5324
Li Zefanfdb372e2009-12-08 11:15:59 +08005325static int tracing_trace_options_open(struct inode *inode, struct file *file)
5326{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005327 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005328 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005329
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005330 ret = tracing_check_open_get_tr(tr);
5331 if (ret)
5332 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005333
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005334 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5335 if (ret < 0)
5336 trace_array_put(tr);
5337
5338 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08005339}
5340
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005341static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08005342 .open = tracing_trace_options_open,
5343 .read = seq_read,
5344 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005345 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05005346 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005347};
5348
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005349static const char readme_msg[] =
5350 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005351 "# echo 0 > tracing_on : quick way to disable tracing\n"
5352 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5353 " Important files:\n"
5354 " trace\t\t\t- The static contents of the buffer\n"
5355 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5356 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5357 " current_tracer\t- function and latency tracers\n"
5358 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05005359 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005360 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5361 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5362 " trace_clock\t\t-change the clock used to order events\n"
5363 " local: Per cpu clock but may not be synced across CPUs\n"
5364 " global: Synced across CPUs but slows tracing down.\n"
5365 " counter: Not a clock, but just an increment\n"
5366 " uptime: Jiffy counter from time of boot\n"
5367 " perf: Same clock that perf events use\n"
5368#ifdef CONFIG_X86_64
5369 " x86-tsc: TSC cycle counter\n"
5370#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06005371 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5372 " delta: Delta difference against a buffer-wide timestamp\n"
5373 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005374 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04005375 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005376 " tracing_cpumask\t- Limit which CPUs to trace\n"
5377 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5378 "\t\t\t Remove sub-buffer with rmdir\n"
5379 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08005380 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005381 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005382 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005383#ifdef CONFIG_DYNAMIC_FTRACE
5384 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005385 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5386 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005387 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005388 "\t modules: Can select a group via module\n"
5389 "\t Format: :mod:<module-name>\n"
5390 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5391 "\t triggers: a command to perform when function is hit\n"
5392 "\t Format: <function>:<trigger>[:count]\n"
5393 "\t trigger: traceon, traceoff\n"
5394 "\t\t enable_event:<system>:<event>\n"
5395 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005396#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005397 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005398#endif
5399#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005400 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005401#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005402 "\t\t dump\n"
5403 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005404 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5405 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5406 "\t The first one will disable tracing every time do_fault is hit\n"
5407 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5408 "\t The first time do trap is hit and it disables tracing, the\n"
5409 "\t counter will decrement to 2. If tracing is already disabled,\n"
5410 "\t the counter will not decrement. It only decrements when the\n"
5411 "\t trigger did work\n"
5412 "\t To remove trigger without count:\n"
5413 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5414 "\t To remove trigger with a count:\n"
5415 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005416 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005417 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5418 "\t modules: Can select a group via module command :mod:\n"
5419 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005420#endif /* CONFIG_DYNAMIC_FTRACE */
5421#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005422 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5423 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005424 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5425 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005426#endif
5427#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5428 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005429 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005430 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5431#endif
5432#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005433 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5434 "\t\t\t snapshot buffer. Read the contents for more\n"
5435 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005436#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005437#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005438 " stack_trace\t\t- Shows the max stack trace when active\n"
5439 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005440 "\t\t\t Write into this file to reset the max size (trigger a\n"
5441 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005442#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005443 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5444 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005445#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005446#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005447#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005448 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005449 "\t\t\t Write into this file to define/undefine new trace events.\n"
5450#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005451#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005452 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005453 "\t\t\t Write into this file to define/undefine new trace events.\n"
5454#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005455#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005456 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005457 "\t\t\t Write into this file to define/undefine new trace events.\n"
5458#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005459#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005460 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005461 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5462 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005463#ifdef CONFIG_HIST_TRIGGERS
5464 "\t s:[synthetic/]<event> <field> [<field>]\n"
5465#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005466 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005467#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005468 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu4725cd82020-09-10 17:55:35 +09005469 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005470#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005471#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +09005472 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005473#endif
5474 "\t args: <name>=fetcharg[:type]\n"
5475 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005476#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005477 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005478#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005479 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005480#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005481 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005482 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005483 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005484 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005485#ifdef CONFIG_HIST_TRIGGERS
5486 "\t field: <stype> <name>;\n"
5487 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5488 "\t [unsigned] char/int/long\n"
5489#endif
Masami Hiramatsu86425622016-08-18 17:58:15 +09005490#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005491 " events/\t\t- Directory containing all trace event subsystems:\n"
5492 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5493 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005494 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5495 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005496 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005497 " events/<system>/<event>/\t- Directory containing control files for\n"
5498 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005499 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5500 " filter\t\t- If set, only events passing filter are traced\n"
5501 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005502 "\t Format: <trigger>[:count][if <filter>]\n"
5503 "\t trigger: traceon, traceoff\n"
5504 "\t enable_event:<system>:<event>\n"
5505 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005506#ifdef CONFIG_HIST_TRIGGERS
5507 "\t enable_hist:<system>:<event>\n"
5508 "\t disable_hist:<system>:<event>\n"
5509#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005510#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005511 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005512#endif
5513#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005514 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005515#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005516#ifdef CONFIG_HIST_TRIGGERS
5517 "\t\t hist (see below)\n"
5518#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005519 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5520 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5521 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5522 "\t events/block/block_unplug/trigger\n"
5523 "\t The first disables tracing every time block_unplug is hit.\n"
5524 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5525 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5526 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5527 "\t Like function triggers, the counter is only decremented if it\n"
5528 "\t enabled or disabled tracing.\n"
5529 "\t To remove a trigger without a count:\n"
5530 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5531 "\t To remove a trigger with a count:\n"
5532 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5533 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005534#ifdef CONFIG_HIST_TRIGGERS
5535 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005536 "\t Format: hist:keys=<field1[,field2,...]>\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005537 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005538 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005539 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005540 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005541 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005542 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005543 "\t [if <filter>]\n\n"
5544 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005545 "\t table using the key(s) and value(s) named, and the value of a\n"
5546 "\t sum called 'hitcount' is incremented. Keys and values\n"
5547 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005548 "\t can be any field, or the special string 'stacktrace'.\n"
5549 "\t Compound keys consisting of up to two fields can be specified\n"
5550 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5551 "\t fields. Sort keys consisting of up to two fields can be\n"
5552 "\t specified using the 'sort' keyword. The sort direction can\n"
5553 "\t be modified by appending '.descending' or '.ascending' to a\n"
5554 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005555 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5556 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5557 "\t its histogram data will be shared with other triggers of the\n"
5558 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005559 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005560 "\t table in its entirety to stdout. If there are multiple hist\n"
5561 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005562 "\t trigger in the output. The table displayed for a named\n"
5563 "\t trigger will be the same as any other instance having the\n"
5564 "\t same name. The default format used to display a given field\n"
5565 "\t can be modified by appending any of the following modifiers\n"
5566 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005567 "\t .hex display a number as a hex value\n"
5568 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005569 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005570 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005571 "\t .syscall display a syscall id as a syscall name\n"
5572 "\t .log2 display log2 value rather than raw number\n"
5573 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005574 "\t The 'pause' parameter can be used to pause an existing hist\n"
5575 "\t trigger or to start a hist trigger but not log any events\n"
5576 "\t until told to do so. 'continue' can be used to start or\n"
5577 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005578 "\t The 'clear' parameter will clear the contents of a running\n"
5579 "\t hist trigger and leave its current paused/active state\n"
5580 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005581 "\t The enable_hist and disable_hist triggers can be used to\n"
5582 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005583 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005584 "\t the enable_event and disable_event triggers.\n\n"
5585 "\t Hist trigger handlers and actions are executed whenever a\n"
5586 "\t a histogram entry is added or updated. They take the form:\n\n"
5587 "\t <handler>.<action>\n\n"
5588 "\t The available handlers are:\n\n"
5589 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005590 "\t onmax(var) - invoke if var exceeds current max\n"
5591 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005592 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005593 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005594 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005595#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussi1bc36bd2020-10-04 17:14:07 -05005596 "\t snapshot() - snapshot the trace buffer\n\n"
5597#endif
5598#ifdef CONFIG_SYNTH_EVENTS
5599 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5600 "\t Write into this file to define/undefine new synthetic events.\n"
5601 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005602#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005603#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005604;
5605
5606static ssize_t
5607tracing_readme_read(struct file *filp, char __user *ubuf,
5608 size_t cnt, loff_t *ppos)
5609{
5610 return simple_read_from_buffer(ubuf, cnt, ppos,
5611 readme_msg, strlen(readme_msg));
5612}
5613
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005614static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005615 .open = tracing_open_generic,
5616 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005617 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005618};
5619
Michael Sartain99c621d2017-07-05 22:07:15 -06005620static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5621{
5622 int *ptr = v;
5623
5624 if (*pos || m->count)
5625 ptr++;
5626
5627 (*pos)++;
5628
5629 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5630 if (trace_find_tgid(*ptr))
5631 return ptr;
5632 }
5633
5634 return NULL;
5635}
5636
5637static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5638{
5639 void *v;
5640 loff_t l = 0;
5641
5642 if (!tgid_map)
5643 return NULL;
5644
5645 v = &tgid_map[0];
5646 while (l <= *pos) {
5647 v = saved_tgids_next(m, v, &l);
5648 if (!v)
5649 return NULL;
5650 }
5651
5652 return v;
5653}
5654
5655static void saved_tgids_stop(struct seq_file *m, void *v)
5656{
5657}
5658
5659static int saved_tgids_show(struct seq_file *m, void *v)
5660{
5661 int pid = (int *)v - tgid_map;
5662
5663 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5664 return 0;
5665}
5666
5667static const struct seq_operations tracing_saved_tgids_seq_ops = {
5668 .start = saved_tgids_start,
5669 .stop = saved_tgids_stop,
5670 .next = saved_tgids_next,
5671 .show = saved_tgids_show,
5672};
5673
5674static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5675{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005676 int ret;
5677
5678 ret = tracing_check_open_get_tr(NULL);
5679 if (ret)
5680 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005681
5682 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5683}
5684
5685
5686static const struct file_operations tracing_saved_tgids_fops = {
5687 .open = tracing_saved_tgids_open,
5688 .read = seq_read,
5689 .llseek = seq_lseek,
5690 .release = seq_release,
5691};
5692
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005693static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005694{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005695 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005696
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005697 if (*pos || m->count)
5698 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005699
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005700 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005701
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005702 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5703 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005704 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005705 continue;
5706
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005707 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005708 }
5709
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005710 return NULL;
5711}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005712
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005713static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5714{
5715 void *v;
5716 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005717
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005718 preempt_disable();
5719 arch_spin_lock(&trace_cmdline_lock);
5720
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005721 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005722 while (l <= *pos) {
5723 v = saved_cmdlines_next(m, v, &l);
5724 if (!v)
5725 return NULL;
5726 }
5727
5728 return v;
5729}
5730
5731static void saved_cmdlines_stop(struct seq_file *m, void *v)
5732{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005733 arch_spin_unlock(&trace_cmdline_lock);
5734 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005735}
5736
5737static int saved_cmdlines_show(struct seq_file *m, void *v)
5738{
5739 char buf[TASK_COMM_LEN];
5740 unsigned int *pid = v;
5741
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005742 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005743 seq_printf(m, "%d %s\n", *pid, buf);
5744 return 0;
5745}
5746
5747static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5748 .start = saved_cmdlines_start,
5749 .next = saved_cmdlines_next,
5750 .stop = saved_cmdlines_stop,
5751 .show = saved_cmdlines_show,
5752};
5753
5754static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5755{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005756 int ret;
5757
5758 ret = tracing_check_open_get_tr(NULL);
5759 if (ret)
5760 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005761
5762 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005763}
5764
5765static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005766 .open = tracing_saved_cmdlines_open,
5767 .read = seq_read,
5768 .llseek = seq_lseek,
5769 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005770};
5771
5772static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005773tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5774 size_t cnt, loff_t *ppos)
5775{
5776 char buf[64];
5777 int r;
5778
5779 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005780 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005781 arch_spin_unlock(&trace_cmdline_lock);
5782
5783 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5784}
5785
5786static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5787{
5788 kfree(s->saved_cmdlines);
5789 kfree(s->map_cmdline_to_pid);
5790 kfree(s);
5791}
5792
5793static int tracing_resize_saved_cmdlines(unsigned int val)
5794{
5795 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5796
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005797 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005798 if (!s)
5799 return -ENOMEM;
5800
5801 if (allocate_cmdlines_buffer(val, s) < 0) {
5802 kfree(s);
5803 return -ENOMEM;
5804 }
5805
5806 arch_spin_lock(&trace_cmdline_lock);
5807 savedcmd_temp = savedcmd;
5808 savedcmd = s;
5809 arch_spin_unlock(&trace_cmdline_lock);
5810 free_saved_cmdlines_buffer(savedcmd_temp);
5811
5812 return 0;
5813}
5814
5815static ssize_t
5816tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5817 size_t cnt, loff_t *ppos)
5818{
5819 unsigned long val;
5820 int ret;
5821
5822 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5823 if (ret)
5824 return ret;
5825
5826 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5827 if (!val || val > PID_MAX_DEFAULT)
5828 return -EINVAL;
5829
5830 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5831 if (ret < 0)
5832 return ret;
5833
5834 *ppos += cnt;
5835
5836 return cnt;
5837}
5838
5839static const struct file_operations tracing_saved_cmdlines_size_fops = {
5840 .open = tracing_open_generic,
5841 .read = tracing_saved_cmdlines_size_read,
5842 .write = tracing_saved_cmdlines_size_write,
5843};
5844
Jeremy Linton681bec02017-05-31 16:56:53 -05005845#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005846static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005847update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005848{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005849 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005850 if (ptr->tail.next) {
5851 ptr = ptr->tail.next;
5852 /* Set ptr to the next real item (skip head) */
5853 ptr++;
5854 } else
5855 return NULL;
5856 }
5857 return ptr;
5858}
5859
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005860static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005861{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005862 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005863
5864 /*
5865 * Paranoid! If ptr points to end, we don't want to increment past it.
5866 * This really should never happen.
5867 */
Vasily Averin039958a2020-01-24 10:03:01 +03005868 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005869 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005870 if (WARN_ON_ONCE(!ptr))
5871 return NULL;
5872
5873 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005874 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005875
5876 return ptr;
5877}
5878
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005879static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005880{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005881 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005882 loff_t l = 0;
5883
Jeremy Linton1793ed92017-05-31 16:56:46 -05005884 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005885
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005886 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005887 if (v)
5888 v++;
5889
5890 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005891 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005892 }
5893
5894 return v;
5895}
5896
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005897static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005898{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005899 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005900}
5901
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005902static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005903{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005904 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005905
5906 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05005907 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005908 ptr->map.system);
5909
5910 return 0;
5911}
5912
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005913static const struct seq_operations tracing_eval_map_seq_ops = {
5914 .start = eval_map_start,
5915 .next = eval_map_next,
5916 .stop = eval_map_stop,
5917 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005918};
5919
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005920static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005921{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005922 int ret;
5923
5924 ret = tracing_check_open_get_tr(NULL);
5925 if (ret)
5926 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005927
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005928 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005929}
5930
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005931static const struct file_operations tracing_eval_map_fops = {
5932 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005933 .read = seq_read,
5934 .llseek = seq_lseek,
5935 .release = seq_release,
5936};
5937
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005938static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05005939trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005940{
5941 /* Return tail of array given the head */
5942 return ptr + ptr->head.length + 1;
5943}
5944
5945static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005946trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005947 int len)
5948{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005949 struct trace_eval_map **stop;
5950 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005951 union trace_eval_map_item *map_array;
5952 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005953
5954 stop = start + len;
5955
5956 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005957 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005958 * where the head holds the module and length of array, and the
5959 * tail holds a pointer to the next list.
5960 */
Kees Cook6da2ec52018-06-12 13:55:00 -07005961 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005962 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005963 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005964 return;
5965 }
5966
Jeremy Linton1793ed92017-05-31 16:56:46 -05005967 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005968
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005969 if (!trace_eval_maps)
5970 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005971 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005972 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005973 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05005974 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005975 if (!ptr->tail.next)
5976 break;
5977 ptr = ptr->tail.next;
5978
5979 }
5980 ptr->tail.next = map_array;
5981 }
5982 map_array->head.mod = mod;
5983 map_array->head.length = len;
5984 map_array++;
5985
5986 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5987 map_array->map = **map;
5988 map_array++;
5989 }
5990 memset(map_array, 0, sizeof(*map_array));
5991
Jeremy Linton1793ed92017-05-31 16:56:46 -05005992 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005993}
5994
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005995static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005996{
Jeremy Linton681bec02017-05-31 16:56:53 -05005997 trace_create_file("eval_map", 0444, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005998 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005999}
6000
Jeremy Linton681bec02017-05-31 16:56:53 -05006001#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006002static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6003static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05006004 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05006005#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006006
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006007static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05006008 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006009{
Jeremy Linton00f4b652017-05-31 16:56:43 -05006010 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006011
6012 if (len <= 0)
6013 return;
6014
6015 map = start;
6016
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006017 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006018
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006019 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006020}
6021
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006022static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006023tracing_set_trace_read(struct file *filp, char __user *ubuf,
6024 size_t cnt, loff_t *ppos)
6025{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006026 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006027 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006028 int r;
6029
6030 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006031 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006032 mutex_unlock(&trace_types_lock);
6033
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006034 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006035}
6036
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006037int tracer_init(struct tracer *t, struct trace_array *tr)
6038{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006039 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006040 return t->init(tr);
6041}
6042
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006043static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006044{
6045 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006046
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006047 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006048 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006049}
6050
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006051#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09006052/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006053static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6054 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09006055{
6056 int cpu, ret = 0;
6057
6058 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6059 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006060 ret = ring_buffer_resize(trace_buf->buffer,
6061 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006062 if (ret < 0)
6063 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006064 per_cpu_ptr(trace_buf->data, cpu)->entries =
6065 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09006066 }
6067 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006068 ret = ring_buffer_resize(trace_buf->buffer,
6069 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006070 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006071 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6072 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09006073 }
6074
6075 return ret;
6076}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006077#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09006078
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006079static int __tracing_resize_ring_buffer(struct trace_array *tr,
6080 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04006081{
6082 int ret;
6083
6084 /*
6085 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04006086 * we use the size that was given, and we can forget about
6087 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04006088 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006089 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04006090
Steven Rostedtb382ede62012-10-10 21:44:34 -04006091 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006092 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04006093 return 0;
6094
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006095 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006096 if (ret < 0)
6097 return ret;
6098
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006099#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006100 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6101 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006102 goto out;
6103
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006104 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006105 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006106 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6107 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006108 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04006109 /*
6110 * AARGH! We are left with different
6111 * size max buffer!!!!
6112 * The max buffer is our "snapshot" buffer.
6113 * When a tracer needs a snapshot (one of the
6114 * latency tracers), it swaps the max buffer
6115 * with the saved snap shot. We succeeded to
6116 * update the size of the main buffer, but failed to
6117 * update the size of the max buffer. But when we tried
6118 * to reset the main buffer to the original size, we
6119 * failed there too. This is very unlikely to
6120 * happen, but if it does, warn and kill all
6121 * tracing.
6122 */
Steven Rostedt73c51622009-03-11 13:42:01 -04006123 WARN_ON(1);
6124 tracing_disabled = 1;
6125 }
6126 return ret;
6127 }
6128
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006129 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006130 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006131 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006132 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006133
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006134 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006135#endif /* CONFIG_TRACER_MAX_TRACE */
6136
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006137 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006138 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006139 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006140 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04006141
6142 return ret;
6143}
6144
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09006145ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6146 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006147{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07006148 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006149
6150 mutex_lock(&trace_types_lock);
6151
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006152 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6153 /* make sure, this cpu is enabled in the mask */
6154 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6155 ret = -EINVAL;
6156 goto out;
6157 }
6158 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006159
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006160 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006161 if (ret < 0)
6162 ret = -ENOMEM;
6163
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006164out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006165 mutex_unlock(&trace_types_lock);
6166
6167 return ret;
6168}
6169
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006170
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006171/**
6172 * tracing_update_buffers - used by tracing facility to expand ring buffers
6173 *
6174 * To save on memory when the tracing is never used on a system with it
6175 * configured in. The ring buffers are set to a minimum size. But once
6176 * a user starts to use the tracing facility, then they need to grow
6177 * to their default size.
6178 *
6179 * This function is to be called when a tracer is about to be used.
6180 */
6181int tracing_update_buffers(void)
6182{
6183 int ret = 0;
6184
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006185 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006186 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006187 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006188 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006189 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006190
6191 return ret;
6192}
6193
Steven Rostedt577b7852009-02-26 23:43:05 -05006194struct trace_option_dentry;
6195
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006196static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006197create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05006198
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006199/*
6200 * Used to clear out the tracer before deletion of an instance.
6201 * Must have trace_types_lock held.
6202 */
6203static void tracing_set_nop(struct trace_array *tr)
6204{
6205 if (tr->current_trace == &nop_trace)
6206 return;
6207
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006208 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006209
6210 if (tr->current_trace->reset)
6211 tr->current_trace->reset(tr);
6212
6213 tr->current_trace = &nop_trace;
6214}
6215
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006216static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006217{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006218 /* Only enable if the directory has been created already. */
6219 if (!tr->dir)
6220 return;
6221
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006222 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006223}
6224
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09006225int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006226{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006227 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006228#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05006229 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006230#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006231 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006232
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006233 mutex_lock(&trace_types_lock);
6234
Steven Rostedt73c51622009-03-11 13:42:01 -04006235 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006236 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006237 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04006238 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01006239 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04006240 ret = 0;
6241 }
6242
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006243 for (t = trace_types; t; t = t->next) {
6244 if (strcmp(t->name, buf) == 0)
6245 break;
6246 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006247 if (!t) {
6248 ret = -EINVAL;
6249 goto out;
6250 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006251 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006252 goto out;
6253
Tom Zanussia35873a2019-02-13 17:42:45 -06006254#ifdef CONFIG_TRACER_SNAPSHOT
6255 if (t->use_max_tr) {
6256 arch_spin_lock(&tr->max_lock);
6257 if (tr->cond_snapshot)
6258 ret = -EBUSY;
6259 arch_spin_unlock(&tr->max_lock);
6260 if (ret)
6261 goto out;
6262 }
6263#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08006264 /* Some tracers won't work on kernel command line */
6265 if (system_state < SYSTEM_RUNNING && t->noboot) {
6266 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6267 t->name);
6268 goto out;
6269 }
6270
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006271 /* Some tracers are only allowed for the top level buffer */
6272 if (!trace_ok_for_array(t, tr)) {
6273 ret = -EINVAL;
6274 goto out;
6275 }
6276
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006277 /* If trace pipe files are being read, we can't change the tracer */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006278 if (tr->trace_ref) {
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006279 ret = -EBUSY;
6280 goto out;
6281 }
6282
Steven Rostedt9f029e82008-11-12 15:24:24 -05006283 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006284
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006285 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006286
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006287 if (tr->current_trace->reset)
6288 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05006289
Paul E. McKenney74401722018-11-06 18:44:52 -08006290 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006291 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05006292
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006293#ifdef CONFIG_TRACER_MAX_TRACE
6294 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05006295
6296 if (had_max_tr && !t->use_max_tr) {
6297 /*
6298 * We need to make sure that the update_max_tr sees that
6299 * current_trace changed to nop_trace to keep it from
6300 * swapping the buffers after we resize it.
6301 * The update_max_tr is called from interrupts disabled
6302 * so a synchronized_sched() is sufficient.
6303 */
Paul E. McKenney74401722018-11-06 18:44:52 -08006304 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006305 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006306 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006307#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006308
6309#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05006310 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006311 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006312 if (ret < 0)
6313 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006314 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006315#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05006316
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006317 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006318 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006319 if (ret)
6320 goto out;
6321 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006322
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006323 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006324 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05006325 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006326 out:
6327 mutex_unlock(&trace_types_lock);
6328
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006329 return ret;
6330}
6331
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006332static ssize_t
6333tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6334 size_t cnt, loff_t *ppos)
6335{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006336 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006337 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006338 int i;
6339 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006340 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006341
Steven Rostedt60063a62008-10-28 10:44:24 -04006342 ret = cnt;
6343
Li Zefanee6c2c12009-09-18 14:06:47 +08006344 if (cnt > MAX_TRACER_SIZE)
6345 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006346
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006347 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006348 return -EFAULT;
6349
6350 buf[cnt] = 0;
6351
6352 /* strip ending whitespace. */
6353 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6354 buf[i] = 0;
6355
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006356 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006357 if (err)
6358 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006359
Jiri Olsacf8517c2009-10-23 19:36:16 -04006360 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006361
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006362 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006363}
6364
6365static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006366tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6367 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006368{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006369 char buf[64];
6370 int r;
6371
Steven Rostedtcffae432008-05-12 21:21:00 +02006372 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006373 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02006374 if (r > sizeof(buf))
6375 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006376 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006377}
6378
6379static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006380tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6381 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006382{
Hannes Eder5e398412009-02-10 19:44:34 +01006383 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006384 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006385
Peter Huewe22fe9b52011-06-07 21:58:27 +02006386 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6387 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006388 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006389
6390 *ptr = val * 1000;
6391
6392 return cnt;
6393}
6394
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006395static ssize_t
6396tracing_thresh_read(struct file *filp, char __user *ubuf,
6397 size_t cnt, loff_t *ppos)
6398{
6399 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6400}
6401
6402static ssize_t
6403tracing_thresh_write(struct file *filp, const char __user *ubuf,
6404 size_t cnt, loff_t *ppos)
6405{
6406 struct trace_array *tr = filp->private_data;
6407 int ret;
6408
6409 mutex_lock(&trace_types_lock);
6410 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6411 if (ret < 0)
6412 goto out;
6413
6414 if (tr->current_trace->update_thresh) {
6415 ret = tr->current_trace->update_thresh(tr);
6416 if (ret < 0)
6417 goto out;
6418 }
6419
6420 ret = cnt;
6421out:
6422 mutex_unlock(&trace_types_lock);
6423
6424 return ret;
6425}
6426
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006427#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006428
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006429static ssize_t
6430tracing_max_lat_read(struct file *filp, char __user *ubuf,
6431 size_t cnt, loff_t *ppos)
6432{
6433 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6434}
6435
6436static ssize_t
6437tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6438 size_t cnt, loff_t *ppos)
6439{
6440 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6441}
6442
Chen Gange428abb2015-11-10 05:15:15 +08006443#endif
6444
Steven Rostedtb3806b42008-05-12 21:20:46 +02006445static int tracing_open_pipe(struct inode *inode, struct file *filp)
6446{
Oleg Nesterov15544202013-07-23 17:25:57 +02006447 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006448 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006449 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006450
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006451 ret = tracing_check_open_get_tr(tr);
6452 if (ret)
6453 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006454
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006455 mutex_lock(&trace_types_lock);
6456
Steven Rostedtb3806b42008-05-12 21:20:46 +02006457 /* create a buffer to store the information to pass to userspace */
6458 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006459 if (!iter) {
6460 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006461 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006462 goto out;
6463 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006464
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006465 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006466 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006467
6468 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6469 ret = -ENOMEM;
6470 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306471 }
6472
Steven Rostedta3097202008-11-07 22:36:02 -05006473 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306474 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006475
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006476 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006477 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6478
David Sharp8be07092012-11-13 12:18:22 -08006479 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006480 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006481 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6482
Oleg Nesterov15544202013-07-23 17:25:57 +02006483 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006484 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006485 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006486 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006487 filp->private_data = iter;
6488
Steven Rostedt107bad82008-05-12 21:21:01 +02006489 if (iter->trace->pipe_open)
6490 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006491
Arnd Bergmannb4447862010-07-07 23:40:11 +02006492 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006493
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006494 tr->trace_ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006495out:
6496 mutex_unlock(&trace_types_lock);
6497 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006498
6499fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006500 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006501 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006502 mutex_unlock(&trace_types_lock);
6503 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006504}
6505
6506static int tracing_release_pipe(struct inode *inode, struct file *file)
6507{
6508 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006509 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006510
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006511 mutex_lock(&trace_types_lock);
6512
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006513 tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006514
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006515 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006516 iter->trace->pipe_close(iter);
6517
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006518 mutex_unlock(&trace_types_lock);
6519
Rusty Russell44623442009-01-01 10:12:23 +10306520 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006521 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006522 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006523
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006524 trace_array_put(tr);
6525
Steven Rostedtb3806b42008-05-12 21:20:46 +02006526 return 0;
6527}
6528
Al Viro9dd95742017-07-03 00:42:43 -04006529static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006530trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006531{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006532 struct trace_array *tr = iter->tr;
6533
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006534 /* Iterators are static, they should be filled or empty */
6535 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006536 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006537
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006538 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006539 /*
6540 * Always select as readable when in blocking mode
6541 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006542 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006543 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006544 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006545 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006546}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006547
Al Viro9dd95742017-07-03 00:42:43 -04006548static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006549tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6550{
6551 struct trace_iterator *iter = filp->private_data;
6552
6553 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006554}
6555
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006556/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006557static int tracing_wait_pipe(struct file *filp)
6558{
6559 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006560 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006561
6562 while (trace_empty(iter)) {
6563
6564 if ((filp->f_flags & O_NONBLOCK)) {
6565 return -EAGAIN;
6566 }
6567
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006568 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006569 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006570 * We still block if tracing is disabled, but we have never
6571 * read anything. This allows a user to cat this file, and
6572 * then enable tracing. But after we have read something,
6573 * we give an EOF when tracing is again disabled.
6574 *
6575 * iter->pos will be 0 if we haven't read anything.
6576 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006577 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006578 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006579
6580 mutex_unlock(&iter->mutex);
6581
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006582 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006583
6584 mutex_lock(&iter->mutex);
6585
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006586 if (ret)
6587 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006588 }
6589
6590 return 1;
6591}
6592
Steven Rostedtb3806b42008-05-12 21:20:46 +02006593/*
6594 * Consumer reader.
6595 */
6596static ssize_t
6597tracing_read_pipe(struct file *filp, char __user *ubuf,
6598 size_t cnt, loff_t *ppos)
6599{
6600 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006601 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006602
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006603 /*
6604 * Avoid more than one consumer on a single file descriptor
6605 * This is just a matter of traces coherency, the ring buffer itself
6606 * is protected.
6607 */
6608 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006609
6610 /* return any leftover data */
6611 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6612 if (sret != -EBUSY)
6613 goto out;
6614
6615 trace_seq_init(&iter->seq);
6616
Steven Rostedt107bad82008-05-12 21:21:01 +02006617 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006618 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6619 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006620 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006621 }
6622
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006623waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006624 sret = tracing_wait_pipe(filp);
6625 if (sret <= 0)
6626 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006627
6628 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006629 if (trace_empty(iter)) {
6630 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006631 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006632 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006633
6634 if (cnt >= PAGE_SIZE)
6635 cnt = PAGE_SIZE - 1;
6636
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006637 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006638 memset(&iter->seq, 0,
6639 sizeof(struct trace_iterator) -
6640 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04006641 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006642 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02006643 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006644
Lai Jiangshan4f535962009-05-18 19:35:34 +08006645 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006646 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006647 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006648 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006649 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006650
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006651 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006652 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006653 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006654 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006655 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006656 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006657 if (ret != TRACE_TYPE_NO_CONSUME)
6658 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006659
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006660 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006661 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006662
6663 /*
6664 * Setting the full flag means we reached the trace_seq buffer
6665 * size and we should leave by partial output condition above.
6666 * One of the trace_seq_* functions is not used properly.
6667 */
6668 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6669 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006670 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006671 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006672 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006673
Steven Rostedtb3806b42008-05-12 21:20:46 +02006674 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006675 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006676 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006677 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006678
6679 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006680 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006681 * entries, go back to wait for more entries.
6682 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006683 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006684 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006685
Steven Rostedt107bad82008-05-12 21:21:01 +02006686out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006687 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006688
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006689 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006690}
6691
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006692static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6693 unsigned int idx)
6694{
6695 __free_page(spd->pages[idx]);
6696}
6697
Steven Rostedt34cd4992009-02-09 12:06:29 -05006698static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006699tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006700{
6701 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006702 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006703 int ret;
6704
6705 /* Seq buffer is page-sized, exactly what we need. */
6706 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006707 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006708 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006709
6710 if (trace_seq_has_overflowed(&iter->seq)) {
6711 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006712 break;
6713 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006714
6715 /*
6716 * This should not be hit, because it should only
6717 * be set if the iter->seq overflowed. But check it
6718 * anyway to be safe.
6719 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006720 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006721 iter->seq.seq.len = save_len;
6722 break;
6723 }
6724
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006725 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006726 if (rem < count) {
6727 rem = 0;
6728 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006729 break;
6730 }
6731
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006732 if (ret != TRACE_TYPE_NO_CONSUME)
6733 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006734 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006735 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006736 rem = 0;
6737 iter->ent = NULL;
6738 break;
6739 }
6740 }
6741
6742 return rem;
6743}
6744
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006745static ssize_t tracing_splice_read_pipe(struct file *filp,
6746 loff_t *ppos,
6747 struct pipe_inode_info *pipe,
6748 size_t len,
6749 unsigned int flags)
6750{
Jens Axboe35f3d142010-05-20 10:43:18 +02006751 struct page *pages_def[PIPE_DEF_BUFFERS];
6752 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006753 struct trace_iterator *iter = filp->private_data;
6754 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006755 .pages = pages_def,
6756 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006757 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006758 .nr_pages_max = PIPE_DEF_BUFFERS,
Christoph Hellwig6797d972020-05-20 17:58:13 +02006759 .ops = &default_pipe_buf_ops,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006760 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006761 };
6762 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006763 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006764 unsigned int i;
6765
Jens Axboe35f3d142010-05-20 10:43:18 +02006766 if (splice_grow_spd(pipe, &spd))
6767 return -ENOMEM;
6768
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006769 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006770
6771 if (iter->trace->splice_read) {
6772 ret = iter->trace->splice_read(iter, filp,
6773 ppos, pipe, len, flags);
6774 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006775 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006776 }
6777
6778 ret = tracing_wait_pipe(filp);
6779 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006780 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006781
Jason Wessel955b61e2010-08-05 09:22:23 -05006782 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006783 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006784 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006785 }
6786
Lai Jiangshan4f535962009-05-18 19:35:34 +08006787 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006788 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006789
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006790 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006791 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006792 spd.pages[i] = alloc_page(GFP_KERNEL);
6793 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006794 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006795
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006796 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006797
6798 /* Copy the data into the page, so we can start over. */
6799 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006800 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006801 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006802 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006803 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006804 break;
6805 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006806 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006807 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006808
Steven Rostedtf9520752009-03-02 14:04:40 -05006809 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006810 }
6811
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006812 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006813 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006814 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006815
6816 spd.nr_pages = i;
6817
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006818 if (i)
6819 ret = splice_to_pipe(pipe, &spd);
6820 else
6821 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006822out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006823 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006824 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006825
Steven Rostedt34cd4992009-02-09 12:06:29 -05006826out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006827 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006828 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006829}
6830
Steven Rostedta98a3c32008-05-12 21:20:59 +02006831static ssize_t
6832tracing_entries_read(struct file *filp, char __user *ubuf,
6833 size_t cnt, loff_t *ppos)
6834{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006835 struct inode *inode = file_inode(filp);
6836 struct trace_array *tr = inode->i_private;
6837 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006838 char buf[64];
6839 int r = 0;
6840 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006841
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006842 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006843
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006844 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006845 int cpu, buf_size_same;
6846 unsigned long size;
6847
6848 size = 0;
6849 buf_size_same = 1;
6850 /* check if all cpu sizes are same */
6851 for_each_tracing_cpu(cpu) {
6852 /* fill in the size from first enabled cpu */
6853 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006854 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6855 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006856 buf_size_same = 0;
6857 break;
6858 }
6859 }
6860
6861 if (buf_size_same) {
6862 if (!ring_buffer_expanded)
6863 r = sprintf(buf, "%lu (expanded: %lu)\n",
6864 size >> 10,
6865 trace_buf_size >> 10);
6866 else
6867 r = sprintf(buf, "%lu\n", size >> 10);
6868 } else
6869 r = sprintf(buf, "X\n");
6870 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006871 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006872
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006873 mutex_unlock(&trace_types_lock);
6874
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006875 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6876 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006877}
6878
6879static ssize_t
6880tracing_entries_write(struct file *filp, const char __user *ubuf,
6881 size_t cnt, loff_t *ppos)
6882{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006883 struct inode *inode = file_inode(filp);
6884 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006885 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006886 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006887
Peter Huewe22fe9b52011-06-07 21:58:27 +02006888 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6889 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006890 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006891
6892 /* must have at least 1 entry */
6893 if (!val)
6894 return -EINVAL;
6895
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006896 /* value is in KB */
6897 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006898 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006899 if (ret < 0)
6900 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006901
Jiri Olsacf8517c2009-10-23 19:36:16 -04006902 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006903
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006904 return cnt;
6905}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05006906
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006907static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006908tracing_total_entries_read(struct file *filp, char __user *ubuf,
6909 size_t cnt, loff_t *ppos)
6910{
6911 struct trace_array *tr = filp->private_data;
6912 char buf[64];
6913 int r, cpu;
6914 unsigned long size = 0, expanded_size = 0;
6915
6916 mutex_lock(&trace_types_lock);
6917 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006918 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07006919 if (!ring_buffer_expanded)
6920 expanded_size += trace_buf_size >> 10;
6921 }
6922 if (ring_buffer_expanded)
6923 r = sprintf(buf, "%lu\n", size);
6924 else
6925 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6926 mutex_unlock(&trace_types_lock);
6927
6928 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6929}
6930
6931static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006932tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6933 size_t cnt, loff_t *ppos)
6934{
6935 /*
6936 * There is no need to read what the user has written, this function
6937 * is just to make sure that there is no error when "echo" is used
6938 */
6939
6940 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006941
6942 return cnt;
6943}
6944
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006945static int
6946tracing_free_buffer_release(struct inode *inode, struct file *filp)
6947{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006948 struct trace_array *tr = inode->i_private;
6949
Steven Rostedtcf30cf62011-06-14 22:44:07 -04006950 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006951 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07006952 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006953 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006954 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006955
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006956 trace_array_put(tr);
6957
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006958 return 0;
6959}
6960
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006961static ssize_t
6962tracing_mark_write(struct file *filp, const char __user *ubuf,
6963 size_t cnt, loff_t *fpos)
6964{
Alexander Z Lam2d716192013-07-01 15:31:24 -07006965 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04006966 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04006967 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05006968 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04006969 struct print_entry *entry;
Steven Rostedtd696b582011-09-22 11:50:27 -04006970 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04006971 int size;
6972 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04006973
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006974/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01006975#define FAULTED_STR "<faulted>"
6976#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006977
Steven Rostedtc76f0692008-11-07 22:36:02 -05006978 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006979 return -EINVAL;
6980
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006981 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07006982 return -EINVAL;
6983
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006984 if (cnt > TRACE_BUF_SIZE)
6985 cnt = TRACE_BUF_SIZE;
6986
Steven Rostedtd696b582011-09-22 11:50:27 -04006987 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006988
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006989 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6990
6991 /* If less than "<faulted>", then make sure we can still add that */
6992 if (cnt < FAULTED_SIZE)
6993 size += FAULTED_SIZE - cnt;
6994
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006995 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05006996 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01006997 tracing_gen_ctx());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05006998 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04006999 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007000 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04007001
7002 entry = ring_buffer_event_data(event);
7003 entry->ip = _THIS_IP_;
7004
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007005 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7006 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007007 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007008 cnt = FAULTED_SIZE;
7009 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04007010 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007011 written = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04007012
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007013 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7014 /* do not add \n before testing triggers, but add \0 */
7015 entry->buf[cnt] = '\0';
Steven Rostedt (VMware)b47e3302021-03-16 12:41:03 -04007016 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007017 }
7018
Steven Rostedtd696b582011-09-22 11:50:27 -04007019 if (entry->buf[cnt - 1] != '\n') {
7020 entry->buf[cnt] = '\n';
7021 entry->buf[cnt + 1] = '\0';
7022 } else
7023 entry->buf[cnt] = '\0';
7024
Tingwei Zhang458999c2020-10-05 10:13:15 +03007025 if (static_branch_unlikely(&trace_marker_exports_enabled))
7026 ftrace_exports(event, TRACE_EXPORT_MARKER);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04007027 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04007028
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007029 if (tt)
7030 event_triggers_post_call(tr->trace_marker_file, tt);
7031
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007032 if (written > 0)
7033 *fpos += written;
Steven Rostedtd696b582011-09-22 11:50:27 -04007034
Steven Rostedtfa32e852016-07-06 15:25:08 -04007035 return written;
7036}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007037
Steven Rostedtfa32e852016-07-06 15:25:08 -04007038/* Limit it for now to 3K (including tag) */
7039#define RAW_DATA_MAX_SIZE (1024*3)
7040
7041static ssize_t
7042tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7043 size_t cnt, loff_t *fpos)
7044{
7045 struct trace_array *tr = filp->private_data;
7046 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007047 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007048 struct raw_data_entry *entry;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007049 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007050 int size;
7051 int len;
7052
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007053#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7054
Steven Rostedtfa32e852016-07-06 15:25:08 -04007055 if (tracing_disabled)
7056 return -EINVAL;
7057
7058 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7059 return -EINVAL;
7060
7061 /* The marker must at least have a tag id */
7062 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7063 return -EINVAL;
7064
7065 if (cnt > TRACE_BUF_SIZE)
7066 cnt = TRACE_BUF_SIZE;
7067
7068 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7069
Steven Rostedtfa32e852016-07-06 15:25:08 -04007070 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007071 if (cnt < FAULT_SIZE_ID)
7072 size += FAULT_SIZE_ID - cnt;
7073
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007074 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05007075 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01007076 tracing_gen_ctx());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007077 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04007078 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007079 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007080
7081 entry = ring_buffer_event_data(event);
7082
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007083 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7084 if (len) {
7085 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007086 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007087 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007088 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007089 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007090
7091 __buffer_unlock_commit(buffer, event);
7092
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007093 if (written > 0)
7094 *fpos += written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007095
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02007096 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007097}
7098
Li Zefan13f16d22009-12-08 11:16:11 +08007099static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08007100{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007101 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08007102 int i;
7103
7104 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08007105 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08007106 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007107 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7108 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08007109 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08007110
Li Zefan13f16d22009-12-08 11:16:11 +08007111 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08007112}
7113
Tom Zanussid71bd342018-01-15 20:52:07 -06007114int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08007115{
Zhaolei5079f322009-08-25 16:12:56 +08007116 int i;
7117
Zhaolei5079f322009-08-25 16:12:56 +08007118 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7119 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7120 break;
7121 }
7122 if (i == ARRAY_SIZE(trace_clocks))
7123 return -EINVAL;
7124
Zhaolei5079f322009-08-25 16:12:56 +08007125 mutex_lock(&trace_types_lock);
7126
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007127 tr->clock_id = i;
7128
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007129 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08007130
David Sharp60303ed2012-10-11 16:27:52 -07007131 /*
7132 * New clock may not be consistent with the previous clock.
7133 * Reset the buffer so that it doesn't have incomparable timestamps.
7134 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007135 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007136
7137#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05007138 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007139 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07007140 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007141#endif
David Sharp60303ed2012-10-11 16:27:52 -07007142
Zhaolei5079f322009-08-25 16:12:56 +08007143 mutex_unlock(&trace_types_lock);
7144
Steven Rostedte1e232c2014-02-10 23:38:46 -05007145 return 0;
7146}
7147
7148static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7149 size_t cnt, loff_t *fpos)
7150{
7151 struct seq_file *m = filp->private_data;
7152 struct trace_array *tr = m->private;
7153 char buf[64];
7154 const char *clockstr;
7155 int ret;
7156
7157 if (cnt >= sizeof(buf))
7158 return -EINVAL;
7159
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08007160 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05007161 return -EFAULT;
7162
7163 buf[cnt] = 0;
7164
7165 clockstr = strstrip(buf);
7166
7167 ret = tracing_set_clock(tr, clockstr);
7168 if (ret)
7169 return ret;
7170
Zhaolei5079f322009-08-25 16:12:56 +08007171 *fpos += cnt;
7172
7173 return cnt;
7174}
7175
Li Zefan13f16d22009-12-08 11:16:11 +08007176static int tracing_clock_open(struct inode *inode, struct file *file)
7177{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007178 struct trace_array *tr = inode->i_private;
7179 int ret;
7180
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007181 ret = tracing_check_open_get_tr(tr);
7182 if (ret)
7183 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007184
7185 ret = single_open(file, tracing_clock_show, inode->i_private);
7186 if (ret < 0)
7187 trace_array_put(tr);
7188
7189 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08007190}
7191
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007192static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7193{
7194 struct trace_array *tr = m->private;
7195
7196 mutex_lock(&trace_types_lock);
7197
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007198 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007199 seq_puts(m, "delta [absolute]\n");
7200 else
7201 seq_puts(m, "[delta] absolute\n");
7202
7203 mutex_unlock(&trace_types_lock);
7204
7205 return 0;
7206}
7207
7208static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7209{
7210 struct trace_array *tr = inode->i_private;
7211 int ret;
7212
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007213 ret = tracing_check_open_get_tr(tr);
7214 if (ret)
7215 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007216
7217 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7218 if (ret < 0)
7219 trace_array_put(tr);
7220
7221 return ret;
7222}
7223
Steven Rostedt (VMware)d8279bfc2021-03-16 12:41:07 -04007224u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7225{
7226 if (rbe == this_cpu_read(trace_buffered_event))
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03007227 return ring_buffer_time_stamp(buffer);
Steven Rostedt (VMware)d8279bfc2021-03-16 12:41:07 -04007228
7229 return ring_buffer_event_time_stamp(buffer, rbe);
7230}
7231
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007232/*
7233 * Set or disable using the per CPU trace_buffer_event when possible.
7234 */
7235int tracing_set_filter_buffering(struct trace_array *tr, bool set)
Tom Zanussi00b41452018-01-15 20:51:39 -06007236{
7237 int ret = 0;
7238
7239 mutex_lock(&trace_types_lock);
7240
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007241 if (set && tr->no_filter_buffering_ref++)
Tom Zanussi00b41452018-01-15 20:51:39 -06007242 goto out;
7243
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007244 if (!set) {
7245 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
Tom Zanussi00b41452018-01-15 20:51:39 -06007246 ret = -EINVAL;
7247 goto out;
7248 }
7249
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007250 --tr->no_filter_buffering_ref;
Tom Zanussi00b41452018-01-15 20:51:39 -06007251 }
Tom Zanussi00b41452018-01-15 20:51:39 -06007252 out:
7253 mutex_unlock(&trace_types_lock);
7254
7255 return ret;
7256}
7257
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007258struct ftrace_buffer_info {
7259 struct trace_iterator iter;
7260 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007261 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007262 unsigned int read;
7263};
7264
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007265#ifdef CONFIG_TRACER_SNAPSHOT
7266static int tracing_snapshot_open(struct inode *inode, struct file *file)
7267{
Oleg Nesterov6484c712013-07-23 17:26:10 +02007268 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007269 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007270 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007271 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007272
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007273 ret = tracing_check_open_get_tr(tr);
7274 if (ret)
7275 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007276
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007277 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02007278 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007279 if (IS_ERR(iter))
7280 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007281 } else {
7282 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007283 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007284 m = kzalloc(sizeof(*m), GFP_KERNEL);
7285 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007286 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007287 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7288 if (!iter) {
7289 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007290 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007291 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007292 ret = 0;
7293
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007294 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007295 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02007296 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007297 m->private = iter;
7298 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007299 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007300out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007301 if (ret < 0)
7302 trace_array_put(tr);
7303
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007304 return ret;
7305}
7306
7307static ssize_t
7308tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7309 loff_t *ppos)
7310{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007311 struct seq_file *m = filp->private_data;
7312 struct trace_iterator *iter = m->private;
7313 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007314 unsigned long val;
7315 int ret;
7316
7317 ret = tracing_update_buffers();
7318 if (ret < 0)
7319 return ret;
7320
7321 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7322 if (ret)
7323 return ret;
7324
7325 mutex_lock(&trace_types_lock);
7326
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007327 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007328 ret = -EBUSY;
7329 goto out;
7330 }
7331
Tom Zanussia35873a2019-02-13 17:42:45 -06007332 arch_spin_lock(&tr->max_lock);
7333 if (tr->cond_snapshot)
7334 ret = -EBUSY;
7335 arch_spin_unlock(&tr->max_lock);
7336 if (ret)
7337 goto out;
7338
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007339 switch (val) {
7340 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007341 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7342 ret = -EINVAL;
7343 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007344 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04007345 if (tr->allocated_snapshot)
7346 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007347 break;
7348 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007349/* Only allow per-cpu swap if the ring buffer supports it */
7350#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7351 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7352 ret = -EINVAL;
7353 break;
7354 }
7355#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007356 if (tr->allocated_snapshot)
7357 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007358 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007359 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007360 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007361 if (ret < 0)
7362 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007363 local_irq_disable();
7364 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007365 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06007366 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007367 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007368 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007369 local_irq_enable();
7370 break;
7371 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05007372 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007373 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7374 tracing_reset_online_cpus(&tr->max_buffer);
7375 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04007376 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007377 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007378 break;
7379 }
7380
7381 if (ret >= 0) {
7382 *ppos += cnt;
7383 ret = cnt;
7384 }
7385out:
7386 mutex_unlock(&trace_types_lock);
7387 return ret;
7388}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007389
7390static int tracing_snapshot_release(struct inode *inode, struct file *file)
7391{
7392 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007393 int ret;
7394
7395 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007396
7397 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007398 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007399
7400 /* If write only, the seq_file is just a stub */
7401 if (m)
7402 kfree(m->private);
7403 kfree(m);
7404
7405 return 0;
7406}
7407
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007408static int tracing_buffers_open(struct inode *inode, struct file *filp);
7409static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7410 size_t count, loff_t *ppos);
7411static int tracing_buffers_release(struct inode *inode, struct file *file);
7412static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7413 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7414
7415static int snapshot_raw_open(struct inode *inode, struct file *filp)
7416{
7417 struct ftrace_buffer_info *info;
7418 int ret;
7419
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007420 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007421 ret = tracing_buffers_open(inode, filp);
7422 if (ret < 0)
7423 return ret;
7424
7425 info = filp->private_data;
7426
7427 if (info->iter.trace->use_max_tr) {
7428 tracing_buffers_release(inode, filp);
7429 return -EBUSY;
7430 }
7431
7432 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007433 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007434
7435 return ret;
7436}
7437
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007438#endif /* CONFIG_TRACER_SNAPSHOT */
7439
7440
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007441static const struct file_operations tracing_thresh_fops = {
7442 .open = tracing_open_generic,
7443 .read = tracing_thresh_read,
7444 .write = tracing_thresh_write,
7445 .llseek = generic_file_llseek,
7446};
7447
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007448#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007449static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007450 .open = tracing_open_generic,
7451 .read = tracing_max_lat_read,
7452 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007453 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007454};
Chen Gange428abb2015-11-10 05:15:15 +08007455#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007456
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007457static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007458 .open = tracing_open_generic,
7459 .read = tracing_set_trace_read,
7460 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007461 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007462};
7463
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007464static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007465 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007466 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007467 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007468 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007469 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007470 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007471};
7472
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007473static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007474 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007475 .read = tracing_entries_read,
7476 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007477 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007478 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007479};
7480
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007481static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007482 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007483 .read = tracing_total_entries_read,
7484 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007485 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007486};
7487
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007488static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007489 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007490 .write = tracing_free_buffer_write,
7491 .release = tracing_free_buffer_release,
7492};
7493
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007494static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007495 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007496 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007497 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007498 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007499};
7500
Steven Rostedtfa32e852016-07-06 15:25:08 -04007501static const struct file_operations tracing_mark_raw_fops = {
7502 .open = tracing_open_generic_tr,
7503 .write = tracing_mark_raw_write,
7504 .llseek = generic_file_llseek,
7505 .release = tracing_release_generic_tr,
7506};
7507
Zhaolei5079f322009-08-25 16:12:56 +08007508static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007509 .open = tracing_clock_open,
7510 .read = seq_read,
7511 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007512 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007513 .write = tracing_clock_write,
7514};
7515
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007516static const struct file_operations trace_time_stamp_mode_fops = {
7517 .open = tracing_time_stamp_mode_open,
7518 .read = seq_read,
7519 .llseek = seq_lseek,
7520 .release = tracing_single_release_tr,
7521};
7522
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007523#ifdef CONFIG_TRACER_SNAPSHOT
7524static const struct file_operations snapshot_fops = {
7525 .open = tracing_snapshot_open,
7526 .read = seq_read,
7527 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007528 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007529 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007530};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007531
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007532static const struct file_operations snapshot_raw_fops = {
7533 .open = snapshot_raw_open,
7534 .read = tracing_buffers_read,
7535 .release = tracing_buffers_release,
7536 .splice_read = tracing_buffers_splice_read,
7537 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007538};
7539
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007540#endif /* CONFIG_TRACER_SNAPSHOT */
7541
Tom Zanussi8a062902019-03-31 18:48:15 -05007542#define TRACING_LOG_ERRS_MAX 8
7543#define TRACING_LOG_LOC_MAX 128
7544
7545#define CMD_PREFIX " Command: "
7546
7547struct err_info {
7548 const char **errs; /* ptr to loc-specific array of err strings */
7549 u8 type; /* index into errs -> specific err string */
7550 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7551 u64 ts;
7552};
7553
7554struct tracing_log_err {
7555 struct list_head list;
7556 struct err_info info;
7557 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7558 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7559};
7560
Tom Zanussi8a062902019-03-31 18:48:15 -05007561static DEFINE_MUTEX(tracing_err_log_lock);
7562
YueHaibingff585c52019-06-14 23:32:10 +08007563static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007564{
7565 struct tracing_log_err *err;
7566
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007567 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007568 err = kzalloc(sizeof(*err), GFP_KERNEL);
7569 if (!err)
7570 err = ERR_PTR(-ENOMEM);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007571 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007572
7573 return err;
7574 }
7575
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007576 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007577 list_del(&err->list);
7578
7579 return err;
7580}
7581
7582/**
7583 * err_pos - find the position of a string within a command for error careting
7584 * @cmd: The tracing command that caused the error
7585 * @str: The string to position the caret at within @cmd
7586 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +01007587 * Finds the position of the first occurrence of @str within @cmd. The
Tom Zanussi8a062902019-03-31 18:48:15 -05007588 * return value can be passed to tracing_log_err() for caret placement
7589 * within @cmd.
7590 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +01007591 * Returns the index within @cmd of the first occurrence of @str or 0
Tom Zanussi8a062902019-03-31 18:48:15 -05007592 * if @str was not found.
7593 */
7594unsigned int err_pos(char *cmd, const char *str)
7595{
7596 char *found;
7597
7598 if (WARN_ON(!strlen(cmd)))
7599 return 0;
7600
7601 found = strstr(cmd, str);
7602 if (found)
7603 return found - cmd;
7604
7605 return 0;
7606}
7607
7608/**
7609 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007610 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007611 * @loc: A string describing where the error occurred
7612 * @cmd: The tracing command that caused the error
7613 * @errs: The array of loc-specific static error strings
7614 * @type: The index into errs[], which produces the specific static err string
7615 * @pos: The position the caret should be placed in the cmd
7616 *
7617 * Writes an error into tracing/error_log of the form:
7618 *
7619 * <loc>: error: <text>
7620 * Command: <cmd>
7621 * ^
7622 *
7623 * tracing/error_log is a small log file containing the last
7624 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7625 * unless there has been a tracing error, and the error log can be
7626 * cleared and have its memory freed by writing the empty string in
7627 * truncation mode to it i.e. echo > tracing/error_log.
7628 *
7629 * NOTE: the @errs array along with the @type param are used to
7630 * produce a static error string - this string is not copied and saved
7631 * when the error is logged - only a pointer to it is saved. See
7632 * existing callers for examples of how static strings are typically
7633 * defined for use with tracing_log_err().
7634 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007635void tracing_log_err(struct trace_array *tr,
7636 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007637 const char **errs, u8 type, u8 pos)
7638{
7639 struct tracing_log_err *err;
7640
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007641 if (!tr)
7642 tr = &global_trace;
7643
Tom Zanussi8a062902019-03-31 18:48:15 -05007644 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007645 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007646 if (PTR_ERR(err) == -ENOMEM) {
7647 mutex_unlock(&tracing_err_log_lock);
7648 return;
7649 }
7650
7651 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7652 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7653
7654 err->info.errs = errs;
7655 err->info.type = type;
7656 err->info.pos = pos;
7657 err->info.ts = local_clock();
7658
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007659 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007660 mutex_unlock(&tracing_err_log_lock);
7661}
7662
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007663static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007664{
7665 struct tracing_log_err *err, *next;
7666
7667 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007668 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007669 list_del(&err->list);
7670 kfree(err);
7671 }
7672
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007673 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007674 mutex_unlock(&tracing_err_log_lock);
7675}
7676
7677static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7678{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007679 struct trace_array *tr = m->private;
7680
Tom Zanussi8a062902019-03-31 18:48:15 -05007681 mutex_lock(&tracing_err_log_lock);
7682
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007683 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007684}
7685
7686static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7687{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007688 struct trace_array *tr = m->private;
7689
7690 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007691}
7692
7693static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7694{
7695 mutex_unlock(&tracing_err_log_lock);
7696}
7697
7698static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7699{
7700 u8 i;
7701
7702 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7703 seq_putc(m, ' ');
7704 for (i = 0; i < pos; i++)
7705 seq_putc(m, ' ');
7706 seq_puts(m, "^\n");
7707}
7708
7709static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7710{
7711 struct tracing_log_err *err = v;
7712
7713 if (err) {
7714 const char *err_text = err->info.errs[err->info.type];
7715 u64 sec = err->info.ts;
7716 u32 nsec;
7717
7718 nsec = do_div(sec, NSEC_PER_SEC);
7719 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7720 err->loc, err_text);
7721 seq_printf(m, "%s", err->cmd);
7722 tracing_err_log_show_pos(m, err->info.pos);
7723 }
7724
7725 return 0;
7726}
7727
7728static const struct seq_operations tracing_err_log_seq_ops = {
7729 .start = tracing_err_log_seq_start,
7730 .next = tracing_err_log_seq_next,
7731 .stop = tracing_err_log_seq_stop,
7732 .show = tracing_err_log_seq_show
7733};
7734
7735static int tracing_err_log_open(struct inode *inode, struct file *file)
7736{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007737 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007738 int ret = 0;
7739
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007740 ret = tracing_check_open_get_tr(tr);
7741 if (ret)
7742 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007743
Tom Zanussi8a062902019-03-31 18:48:15 -05007744 /* If this file was opened for write, then erase contents */
7745 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007746 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007747
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007748 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007749 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007750 if (!ret) {
7751 struct seq_file *m = file->private_data;
7752 m->private = tr;
7753 } else {
7754 trace_array_put(tr);
7755 }
7756 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007757 return ret;
7758}
7759
7760static ssize_t tracing_err_log_write(struct file *file,
7761 const char __user *buffer,
7762 size_t count, loff_t *ppos)
7763{
7764 return count;
7765}
7766
Takeshi Misawad122ed62019-06-28 19:56:40 +09007767static int tracing_err_log_release(struct inode *inode, struct file *file)
7768{
7769 struct trace_array *tr = inode->i_private;
7770
7771 trace_array_put(tr);
7772
7773 if (file->f_mode & FMODE_READ)
7774 seq_release(inode, file);
7775
7776 return 0;
7777}
7778
Tom Zanussi8a062902019-03-31 18:48:15 -05007779static const struct file_operations tracing_err_log_fops = {
7780 .open = tracing_err_log_open,
7781 .write = tracing_err_log_write,
7782 .read = seq_read,
7783 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007784 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007785};
7786
Steven Rostedt2cadf912008-12-01 22:20:19 -05007787static int tracing_buffers_open(struct inode *inode, struct file *filp)
7788{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007789 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007790 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007791 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007792
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007793 ret = tracing_check_open_get_tr(tr);
7794 if (ret)
7795 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007796
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007797 info = kvzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007798 if (!info) {
7799 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007800 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007801 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007802
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007803 mutex_lock(&trace_types_lock);
7804
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007805 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007806 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007807 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007808 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007809 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007810 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007811 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007812
7813 filp->private_data = info;
7814
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007815 tr->trace_ref++;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007816
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007817 mutex_unlock(&trace_types_lock);
7818
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007819 ret = nonseekable_open(inode, filp);
7820 if (ret < 0)
7821 trace_array_put(tr);
7822
7823 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007824}
7825
Al Viro9dd95742017-07-03 00:42:43 -04007826static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007827tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7828{
7829 struct ftrace_buffer_info *info = filp->private_data;
7830 struct trace_iterator *iter = &info->iter;
7831
7832 return trace_poll(iter, filp, poll_table);
7833}
7834
Steven Rostedt2cadf912008-12-01 22:20:19 -05007835static ssize_t
7836tracing_buffers_read(struct file *filp, char __user *ubuf,
7837 size_t count, loff_t *ppos)
7838{
7839 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007840 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007841 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007842 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007843
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007844 if (!count)
7845 return 0;
7846
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007847#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007848 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7849 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007850#endif
7851
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007852 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007853 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007854 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007855 if (IS_ERR(info->spare)) {
7856 ret = PTR_ERR(info->spare);
7857 info->spare = NULL;
7858 } else {
7859 info->spare_cpu = iter->cpu_file;
7860 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007861 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007862 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04007863 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007864
Steven Rostedt2cadf912008-12-01 22:20:19 -05007865 /* Do we have previous read data to read? */
7866 if (info->read < PAGE_SIZE)
7867 goto read;
7868
Steven Rostedtb6273442013-02-28 13:44:11 -05007869 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007870 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007871 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007872 &info->spare,
7873 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007874 iter->cpu_file, 0);
7875 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05007876
7877 if (ret < 0) {
7878 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007879 if ((filp->f_flags & O_NONBLOCK))
7880 return -EAGAIN;
7881
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05007882 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007883 if (ret)
7884 return ret;
7885
Steven Rostedtb6273442013-02-28 13:44:11 -05007886 goto again;
7887 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007888 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007889 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007890
Steven Rostedt436fc282011-10-14 10:44:25 -04007891 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05007892 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05007893 size = PAGE_SIZE - info->read;
7894 if (size > count)
7895 size = count;
7896
7897 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05007898 if (ret == size)
7899 return -EFAULT;
7900
Steven Rostedt2dc5d122009-03-04 19:10:05 -05007901 size -= ret;
7902
Steven Rostedt2cadf912008-12-01 22:20:19 -05007903 *ppos += size;
7904 info->read += size;
7905
7906 return size;
7907}
7908
7909static int tracing_buffers_release(struct inode *inode, struct file *file)
7910{
7911 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007912 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007913
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007914 mutex_lock(&trace_types_lock);
7915
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007916 iter->tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007917
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007918 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007919
Lai Jiangshanddd538f2009-04-02 15:16:59 +08007920 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007921 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007922 info->spare_cpu, info->spare);
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007923 kvfree(info);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007924
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007925 mutex_unlock(&trace_types_lock);
7926
Steven Rostedt2cadf912008-12-01 22:20:19 -05007927 return 0;
7928}
7929
7930struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007931 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007932 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007933 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02007934 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007935};
7936
Jann Hornb9872222019-04-04 23:59:25 +02007937static void buffer_ref_release(struct buffer_ref *ref)
7938{
7939 if (!refcount_dec_and_test(&ref->refcount))
7940 return;
7941 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7942 kfree(ref);
7943}
7944
Steven Rostedt2cadf912008-12-01 22:20:19 -05007945static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7946 struct pipe_buffer *buf)
7947{
7948 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7949
Jann Hornb9872222019-04-04 23:59:25 +02007950 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007951 buf->private = 0;
7952}
7953
Matthew Wilcox15fab632019-04-05 14:02:10 -07007954static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007955 struct pipe_buffer *buf)
7956{
7957 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7958
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07007959 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07007960 return false;
7961
Jann Hornb9872222019-04-04 23:59:25 +02007962 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07007963 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007964}
7965
7966/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08007967static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05007968 .release = buffer_pipe_buf_release,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007969 .get = buffer_pipe_buf_get,
7970};
7971
7972/*
7973 * Callback from splice_to_pipe(), if we need to release some pages
7974 * at the end of the spd in case we error'ed out in filling the pipe.
7975 */
7976static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7977{
7978 struct buffer_ref *ref =
7979 (struct buffer_ref *)spd->partial[i].private;
7980
Jann Hornb9872222019-04-04 23:59:25 +02007981 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007982 spd->partial[i].private = 0;
7983}
7984
7985static ssize_t
7986tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7987 struct pipe_inode_info *pipe, size_t len,
7988 unsigned int flags)
7989{
7990 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007991 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02007992 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7993 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05007994 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02007995 .pages = pages_def,
7996 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02007997 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007998 .ops = &buffer_pipe_buf_ops,
7999 .spd_release = buffer_spd_release,
8000 };
8001 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05008002 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01008003 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008004
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008005#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008006 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8007 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008008#endif
8009
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008010 if (*ppos & (PAGE_SIZE - 1))
8011 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008012
8013 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008014 if (len < PAGE_SIZE)
8015 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008016 len &= PAGE_MASK;
8017 }
8018
Al Viro1ae22932016-09-17 18:31:46 -04008019 if (splice_grow_spd(pipe, &spd))
8020 return -ENOMEM;
8021
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008022 again:
8023 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008024 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04008025
Al Viroa786c062014-04-11 12:01:03 -04008026 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05008027 struct page *page;
8028 int r;
8029
8030 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01008031 if (!ref) {
8032 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008033 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01008034 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05008035
Jann Hornb9872222019-04-04 23:59:25 +02008036 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008037 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008038 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008039 if (IS_ERR(ref->page)) {
8040 ret = PTR_ERR(ref->page);
8041 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008042 kfree(ref);
8043 break;
8044 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008045 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008046
8047 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008048 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008049 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008050 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8051 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008052 kfree(ref);
8053 break;
8054 }
8055
Steven Rostedt2cadf912008-12-01 22:20:19 -05008056 page = virt_to_page(ref->page);
8057
8058 spd.pages[i] = page;
8059 spd.partial[i].len = PAGE_SIZE;
8060 spd.partial[i].offset = 0;
8061 spd.partial[i].private = (unsigned long)ref;
8062 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008063 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04008064
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008065 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008066 }
8067
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008068 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008069 spd.nr_pages = i;
8070
8071 /* did we read anything? */
8072 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01008073 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04008074 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01008075
Al Viro1ae22932016-09-17 18:31:46 -04008076 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008077 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04008078 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008079
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008080 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04008081 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04008082 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01008083
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008084 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008085 }
8086
8087 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04008088out:
Eric Dumazet047fe362012-06-12 15:24:40 +02008089 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008090
Steven Rostedt2cadf912008-12-01 22:20:19 -05008091 return ret;
8092}
8093
8094static const struct file_operations tracing_buffers_fops = {
8095 .open = tracing_buffers_open,
8096 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008097 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008098 .release = tracing_buffers_release,
8099 .splice_read = tracing_buffers_splice_read,
8100 .llseek = no_llseek,
8101};
8102
Steven Rostedtc8d77182009-04-29 18:03:45 -04008103static ssize_t
8104tracing_stats_read(struct file *filp, char __user *ubuf,
8105 size_t count, loff_t *ppos)
8106{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008107 struct inode *inode = file_inode(filp);
8108 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008109 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008110 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008111 struct trace_seq *s;
8112 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008113 unsigned long long t;
8114 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04008115
Li Zefane4f2d102009-06-15 10:57:28 +08008116 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008117 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01008118 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04008119
8120 trace_seq_init(s);
8121
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008122 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008123 trace_seq_printf(s, "entries: %ld\n", cnt);
8124
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008125 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008126 trace_seq_printf(s, "overrun: %ld\n", cnt);
8127
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008128 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008129 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8130
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008131 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008132 trace_seq_printf(s, "bytes: %ld\n", cnt);
8133
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09008134 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008135 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008136 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008137 usec_rem = do_div(t, USEC_PER_SEC);
8138 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8139 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008140
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03008141 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008142 usec_rem = do_div(t, USEC_PER_SEC);
8143 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8144 } else {
8145 /* counter or tsc mode for trace_clock */
8146 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008147 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008148
8149 trace_seq_printf(s, "now ts: %llu\n",
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03008150 ring_buffer_time_stamp(trace_buf->buffer));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008151 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008152
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008153 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07008154 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8155
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008156 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05008157 trace_seq_printf(s, "read events: %ld\n", cnt);
8158
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05008159 count = simple_read_from_buffer(ubuf, count, ppos,
8160 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04008161
8162 kfree(s);
8163
8164 return count;
8165}
8166
8167static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008168 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04008169 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008170 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008171 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04008172};
8173
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008174#ifdef CONFIG_DYNAMIC_FTRACE
8175
8176static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008177tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008178 size_t cnt, loff_t *ppos)
8179{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008180 ssize_t ret;
8181 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008182 int r;
8183
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008184 /* 256 should be plenty to hold the amount needed */
8185 buf = kmalloc(256, GFP_KERNEL);
8186 if (!buf)
8187 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008188
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008189 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8190 ftrace_update_tot_cnt,
8191 ftrace_number_of_pages,
8192 ftrace_number_of_groups);
8193
8194 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8195 kfree(buf);
8196 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008197}
8198
Steven Rostedt5e2336a2009-03-05 21:44:55 -05008199static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02008200 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008201 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008202 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008203};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008204#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008205
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008206#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8207static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04008208ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008209 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008210 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008211{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04008212 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008213}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008214
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008215static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04008216ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008217 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008218 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008219{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008220 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008221 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008222
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008223 if (mapper)
8224 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008225
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008226 if (count) {
8227
8228 if (*count <= 0)
8229 return;
8230
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008231 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008232 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008233
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04008234 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008235}
8236
8237static int
8238ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8239 struct ftrace_probe_ops *ops, void *data)
8240{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008241 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008242 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008243
8244 seq_printf(m, "%ps:", (void *)ip);
8245
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01008246 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008247
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008248 if (mapper)
8249 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8250
8251 if (count)
8252 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008253 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008254 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008255
8256 return 0;
8257}
8258
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008259static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008260ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008261 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008262{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008263 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008264
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008265 if (!mapper) {
8266 mapper = allocate_ftrace_func_mapper();
8267 if (!mapper)
8268 return -ENOMEM;
8269 *data = mapper;
8270 }
8271
8272 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008273}
8274
8275static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008276ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008277 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008278{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008279 struct ftrace_func_mapper *mapper = data;
8280
8281 if (!ip) {
8282 if (!mapper)
8283 return;
8284 free_ftrace_func_mapper(mapper, NULL);
8285 return;
8286 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008287
8288 ftrace_func_mapper_remove_ip(mapper, ip);
8289}
8290
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008291static struct ftrace_probe_ops snapshot_probe_ops = {
8292 .func = ftrace_snapshot,
8293 .print = ftrace_snapshot_print,
8294};
8295
8296static struct ftrace_probe_ops snapshot_count_probe_ops = {
8297 .func = ftrace_count_snapshot,
8298 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008299 .init = ftrace_snapshot_init,
8300 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008301};
8302
8303static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008304ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008305 char *glob, char *cmd, char *param, int enable)
8306{
8307 struct ftrace_probe_ops *ops;
8308 void *count = (void *)-1;
8309 char *number;
8310 int ret;
8311
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04008312 if (!tr)
8313 return -ENODEV;
8314
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008315 /* hash funcs only work with set_ftrace_filter */
8316 if (!enable)
8317 return -EINVAL;
8318
8319 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8320
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04008321 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04008322 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008323
8324 if (!param)
8325 goto out_reg;
8326
8327 number = strsep(&param, ":");
8328
8329 if (!strlen(number))
8330 goto out_reg;
8331
8332 /*
8333 * We use the callback data field (which is a pointer)
8334 * as our counter.
8335 */
8336 ret = kstrtoul(number, 0, (unsigned long *)&count);
8337 if (ret)
8338 return ret;
8339
8340 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04008341 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008342 if (ret < 0)
8343 goto out;
8344
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008345 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008346
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008347 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008348 return ret < 0 ? ret : 0;
8349}
8350
8351static struct ftrace_func_command ftrace_snapshot_cmd = {
8352 .name = "snapshot",
8353 .func = ftrace_trace_snapshot_callback,
8354};
8355
Tom Zanussi38de93a2013-10-24 08:34:18 -05008356static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008357{
8358 return register_ftrace_command(&ftrace_snapshot_cmd);
8359}
8360#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05008361static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008362#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008363
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008364static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008365{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008366 if (WARN_ON(!tr->dir))
8367 return ERR_PTR(-ENODEV);
8368
8369 /* Top directory uses NULL as the parent */
8370 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8371 return NULL;
8372
8373 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008374 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008375}
8376
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008377static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8378{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008379 struct dentry *d_tracer;
8380
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008381 if (tr->percpu_dir)
8382 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008383
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008384 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008385 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008386 return NULL;
8387
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008388 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008389
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008390 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008391 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008392
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008393 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008394}
8395
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008396static struct dentry *
8397trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8398 void *data, long cpu, const struct file_operations *fops)
8399{
8400 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8401
8402 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008403 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008404 return ret;
8405}
8406
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008407static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008408tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008409{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008410 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008411 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008412 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008413
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008414 if (!d_percpu)
8415 return;
8416
Steven Rostedtdd49a382010-10-20 21:51:26 -04008417 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008418 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008419 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008420 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008421 return;
8422 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008423
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008424 /* per cpu trace_pipe */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008425 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008426 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008427
8428 /* per cpu trace */
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008429 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008430 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008431
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008432 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008433 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008434
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008435 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008436 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008437
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008438 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008439 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008440
8441#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008442 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008443 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008444
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008445 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008446 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008447#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008448}
8449
Steven Rostedt60a11772008-05-12 21:20:44 +02008450#ifdef CONFIG_FTRACE_SELFTEST
8451/* Let selftest have access to static functions in this file */
8452#include "trace_selftest.c"
8453#endif
8454
Steven Rostedt577b7852009-02-26 23:43:05 -05008455static ssize_t
8456trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8457 loff_t *ppos)
8458{
8459 struct trace_option_dentry *topt = filp->private_data;
8460 char *buf;
8461
8462 if (topt->flags->val & topt->opt->bit)
8463 buf = "1\n";
8464 else
8465 buf = "0\n";
8466
8467 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8468}
8469
8470static ssize_t
8471trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8472 loff_t *ppos)
8473{
8474 struct trace_option_dentry *topt = filp->private_data;
8475 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008476 int ret;
8477
Peter Huewe22fe9b52011-06-07 21:58:27 +02008478 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8479 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008480 return ret;
8481
Li Zefan8d18eaa2009-12-08 11:17:06 +08008482 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008483 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008484
8485 if (!!(topt->flags->val & topt->opt->bit) != val) {
8486 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008487 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008488 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008489 mutex_unlock(&trace_types_lock);
8490 if (ret)
8491 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008492 }
8493
8494 *ppos += cnt;
8495
8496 return cnt;
8497}
8498
8499
8500static const struct file_operations trace_options_fops = {
8501 .open = tracing_open_generic,
8502 .read = trace_options_read,
8503 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008504 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008505};
8506
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008507/*
8508 * In order to pass in both the trace_array descriptor as well as the index
8509 * to the flag that the trace option file represents, the trace_array
8510 * has a character array of trace_flags_index[], which holds the index
8511 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8512 * The address of this character array is passed to the flag option file
8513 * read/write callbacks.
8514 *
8515 * In order to extract both the index and the trace_array descriptor,
8516 * get_tr_index() uses the following algorithm.
8517 *
8518 * idx = *ptr;
8519 *
8520 * As the pointer itself contains the address of the index (remember
8521 * index[1] == 1).
8522 *
8523 * Then to get the trace_array descriptor, by subtracting that index
8524 * from the ptr, we get to the start of the index itself.
8525 *
8526 * ptr - idx == &index[0]
8527 *
8528 * Then a simple container_of() from that pointer gets us to the
8529 * trace_array descriptor.
8530 */
8531static void get_tr_index(void *data, struct trace_array **ptr,
8532 unsigned int *pindex)
8533{
8534 *pindex = *(unsigned char *)data;
8535
8536 *ptr = container_of(data - *pindex, struct trace_array,
8537 trace_flags_index);
8538}
8539
Steven Rostedta8259072009-02-26 22:19:12 -05008540static ssize_t
8541trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8542 loff_t *ppos)
8543{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008544 void *tr_index = filp->private_data;
8545 struct trace_array *tr;
8546 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008547 char *buf;
8548
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008549 get_tr_index(tr_index, &tr, &index);
8550
8551 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008552 buf = "1\n";
8553 else
8554 buf = "0\n";
8555
8556 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8557}
8558
8559static ssize_t
8560trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8561 loff_t *ppos)
8562{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008563 void *tr_index = filp->private_data;
8564 struct trace_array *tr;
8565 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008566 unsigned long val;
8567 int ret;
8568
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008569 get_tr_index(tr_index, &tr, &index);
8570
Peter Huewe22fe9b52011-06-07 21:58:27 +02008571 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8572 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008573 return ret;
8574
Zhaoleif2d84b62009-08-07 18:55:48 +08008575 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008576 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008577
Prateek Sood3a53acf2019-12-10 09:15:16 +00008578 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008579 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008580 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008581 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008582 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008583
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008584 if (ret < 0)
8585 return ret;
8586
Steven Rostedta8259072009-02-26 22:19:12 -05008587 *ppos += cnt;
8588
8589 return cnt;
8590}
8591
Steven Rostedta8259072009-02-26 22:19:12 -05008592static const struct file_operations trace_options_core_fops = {
8593 .open = tracing_open_generic,
8594 .read = trace_options_core_read,
8595 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008596 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008597};
8598
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008599struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008600 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008601 struct dentry *parent,
8602 void *data,
8603 const struct file_operations *fops)
8604{
8605 struct dentry *ret;
8606
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008607 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008608 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008609 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008610
8611 return ret;
8612}
8613
8614
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008615static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008616{
8617 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008618
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008619 if (tr->options)
8620 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008621
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008622 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008623 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008624 return NULL;
8625
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008626 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008627 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008628 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008629 return NULL;
8630 }
8631
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008632 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008633}
8634
Steven Rostedt577b7852009-02-26 23:43:05 -05008635static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008636create_trace_option_file(struct trace_array *tr,
8637 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008638 struct tracer_flags *flags,
8639 struct tracer_opt *opt)
8640{
8641 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008642
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008643 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008644 if (!t_options)
8645 return;
8646
8647 topt->flags = flags;
8648 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008649 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008650
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008651 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008652 &trace_options_fops);
8653
Steven Rostedt577b7852009-02-26 23:43:05 -05008654}
8655
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008656static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008657create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008658{
8659 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008660 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008661 struct tracer_flags *flags;
8662 struct tracer_opt *opts;
8663 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008664 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008665
8666 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008667 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008668
8669 flags = tracer->flags;
8670
8671 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008672 return;
8673
8674 /*
8675 * If this is an instance, only create flags for tracers
8676 * the instance may have.
8677 */
8678 if (!trace_ok_for_array(tracer, tr))
8679 return;
8680
8681 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008682 /* Make sure there's no duplicate flags. */
8683 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008684 return;
8685 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008686
8687 opts = flags->opts;
8688
8689 for (cnt = 0; opts[cnt].name; cnt++)
8690 ;
8691
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008692 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008693 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008694 return;
8695
8696 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8697 GFP_KERNEL);
8698 if (!tr_topts) {
8699 kfree(topts);
8700 return;
8701 }
8702
8703 tr->topts = tr_topts;
8704 tr->topts[tr->nr_topts].tracer = tracer;
8705 tr->topts[tr->nr_topts].topts = topts;
8706 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008707
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008708 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008709 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008710 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008711 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008712 "Failed to create trace option: %s",
8713 opts[cnt].name);
8714 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008715}
8716
Steven Rostedta8259072009-02-26 22:19:12 -05008717static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008718create_trace_option_core_file(struct trace_array *tr,
8719 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008720{
8721 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008722
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008723 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008724 if (!t_options)
8725 return NULL;
8726
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008727 return trace_create_file(option, 0644, t_options,
8728 (void *)&tr->trace_flags_index[index],
8729 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008730}
8731
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008732static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008733{
8734 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008735 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008736 int i;
8737
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008738 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008739 if (!t_options)
8740 return;
8741
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008742 for (i = 0; trace_options[i]; i++) {
8743 if (top_level ||
8744 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8745 create_trace_option_core_file(tr, trace_options[i], i);
8746 }
Steven Rostedta8259072009-02-26 22:19:12 -05008747}
8748
Steven Rostedt499e5472012-02-22 15:50:28 -05008749static ssize_t
8750rb_simple_read(struct file *filp, char __user *ubuf,
8751 size_t cnt, loff_t *ppos)
8752{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008753 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008754 char buf[64];
8755 int r;
8756
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008757 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008758 r = sprintf(buf, "%d\n", r);
8759
8760 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8761}
8762
8763static ssize_t
8764rb_simple_write(struct file *filp, const char __user *ubuf,
8765 size_t cnt, loff_t *ppos)
8766{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008767 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008768 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008769 unsigned long val;
8770 int ret;
8771
8772 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8773 if (ret)
8774 return ret;
8775
8776 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008777 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008778 if (!!val == tracer_tracing_is_on(tr)) {
8779 val = 0; /* do nothing */
8780 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008781 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008782 if (tr->current_trace->start)
8783 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008784 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008785 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008786 if (tr->current_trace->stop)
8787 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008788 }
8789 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008790 }
8791
8792 (*ppos)++;
8793
8794 return cnt;
8795}
8796
8797static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008798 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008799 .read = rb_simple_read,
8800 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008801 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008802 .llseek = default_llseek,
8803};
8804
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008805static ssize_t
8806buffer_percent_read(struct file *filp, char __user *ubuf,
8807 size_t cnt, loff_t *ppos)
8808{
8809 struct trace_array *tr = filp->private_data;
8810 char buf[64];
8811 int r;
8812
8813 r = tr->buffer_percent;
8814 r = sprintf(buf, "%d\n", r);
8815
8816 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8817}
8818
8819static ssize_t
8820buffer_percent_write(struct file *filp, const char __user *ubuf,
8821 size_t cnt, loff_t *ppos)
8822{
8823 struct trace_array *tr = filp->private_data;
8824 unsigned long val;
8825 int ret;
8826
8827 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8828 if (ret)
8829 return ret;
8830
8831 if (val > 100)
8832 return -EINVAL;
8833
8834 if (!val)
8835 val = 1;
8836
8837 tr->buffer_percent = val;
8838
8839 (*ppos)++;
8840
8841 return cnt;
8842}
8843
8844static const struct file_operations buffer_percent_fops = {
8845 .open = tracing_open_generic_tr,
8846 .read = buffer_percent_read,
8847 .write = buffer_percent_write,
8848 .release = tracing_release_generic_tr,
8849 .llseek = default_llseek,
8850};
8851
YueHaibingff585c52019-06-14 23:32:10 +08008852static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04008853
8854static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008855init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04008856
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008857static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008858allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04008859{
8860 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008861
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04008862 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008863
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05008864 buf->tr = tr;
8865
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008866 buf->buffer = ring_buffer_alloc(size, rb_flags);
8867 if (!buf->buffer)
8868 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008869
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008870 buf->data = alloc_percpu(struct trace_array_cpu);
8871 if (!buf->data) {
8872 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05008873 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008874 return -ENOMEM;
8875 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008876
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008877 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008878 set_buffer_entries(&tr->array_buffer,
8879 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008880
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008881 return 0;
8882}
8883
8884static int allocate_trace_buffers(struct trace_array *tr, int size)
8885{
8886 int ret;
8887
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008888 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008889 if (ret)
8890 return ret;
8891
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008892#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008893 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8894 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008895 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008896 ring_buffer_free(tr->array_buffer.buffer);
8897 tr->array_buffer.buffer = NULL;
8898 free_percpu(tr->array_buffer.data);
8899 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008900 return -ENOMEM;
8901 }
8902 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008903
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05008904 /*
8905 * Only the top level trace array gets its snapshot allocated
8906 * from the kernel command line.
8907 */
8908 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008909#endif
Steven Rostedt (VMware)11f5efc2020-05-06 10:36:18 -04008910
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008911 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05008912}
8913
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008914static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008915{
8916 if (buf->buffer) {
8917 ring_buffer_free(buf->buffer);
8918 buf->buffer = NULL;
8919 free_percpu(buf->data);
8920 buf->data = NULL;
8921 }
8922}
8923
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008924static void free_trace_buffers(struct trace_array *tr)
8925{
8926 if (!tr)
8927 return;
8928
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008929 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008930
8931#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04008932 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04008933#endif
8934}
8935
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008936static void init_trace_flags_index(struct trace_array *tr)
8937{
8938 int i;
8939
8940 /* Used by the trace options files */
8941 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8942 tr->trace_flags_index[i] = i;
8943}
8944
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008945static void __update_tracer_options(struct trace_array *tr)
8946{
8947 struct tracer *t;
8948
8949 for (t = trace_types; t; t = t->next)
8950 add_tracer_options(tr, t);
8951}
8952
8953static void update_tracer_options(struct trace_array *tr)
8954{
8955 mutex_lock(&trace_types_lock);
8956 __update_tracer_options(tr);
8957 mutex_unlock(&trace_types_lock);
8958}
8959
Tom Zanussi89c95fc2020-01-29 12:59:21 -06008960/* Must have trace_types_lock held */
8961struct trace_array *trace_array_find(const char *instance)
8962{
8963 struct trace_array *tr, *found = NULL;
8964
8965 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8966 if (tr->name && strcmp(tr->name, instance) == 0) {
8967 found = tr;
8968 break;
8969 }
8970 }
8971
8972 return found;
8973}
8974
8975struct trace_array *trace_array_find_get(const char *instance)
8976{
8977 struct trace_array *tr;
8978
8979 mutex_lock(&trace_types_lock);
8980 tr = trace_array_find(instance);
8981 if (tr)
8982 tr->ref++;
8983 mutex_unlock(&trace_types_lock);
8984
8985 return tr;
8986}
8987
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09008988static int trace_array_create_dir(struct trace_array *tr)
8989{
8990 int ret;
8991
8992 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8993 if (!tr->dir)
8994 return -EINVAL;
8995
8996 ret = event_trace_add_tracer(tr->dir, tr);
8997 if (ret)
8998 tracefs_remove(tr->dir);
8999
9000 init_tracer_tracefs(tr, tr->dir);
9001 __update_tracer_options(tr);
9002
9003 return ret;
9004}
9005
Divya Indi28879782019-11-20 11:08:38 -08009006static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009007{
Steven Rostedt277ba042012-08-03 16:10:49 -04009008 struct trace_array *tr;
9009 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04009010
Steven Rostedt277ba042012-08-03 16:10:49 -04009011 ret = -ENOMEM;
9012 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9013 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08009014 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04009015
9016 tr->name = kstrdup(name, GFP_KERNEL);
9017 if (!tr->name)
9018 goto out_free_tr;
9019
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009020 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9021 goto out_free_tr;
9022
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04009023 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009024
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009025 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9026
Steven Rostedt277ba042012-08-03 16:10:49 -04009027 raw_spin_lock_init(&tr->start_lock);
9028
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009029 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9030
Steven Rostedt277ba042012-08-03 16:10:49 -04009031 tr->current_trace = &nop_trace;
9032
9033 INIT_LIST_HEAD(&tr->systems);
9034 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009035 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009036 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04009037
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009038 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04009039 goto out_free_tr;
9040
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009041 if (ftrace_allocate_ftrace_ops(tr) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04009042 goto out_free_tr;
9043
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04009044 ftrace_init_trace_array(tr);
9045
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009046 init_trace_flags_index(tr);
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009047
9048 if (trace_instance_dir) {
9049 ret = trace_array_create_dir(tr);
9050 if (ret)
9051 goto out_free_tr;
Masami Hiramatsu720dee52020-09-25 01:40:08 +09009052 } else
9053 __trace_early_add_events(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04009054
9055 list_add(&tr->list, &ftrace_trace_arrays);
9056
Divya Indi28879782019-11-20 11:08:38 -08009057 tr->ref++;
9058
Divya Indif45d1222019-03-20 11:28:51 -07009059 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04009060
9061 out_free_tr:
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009062 ftrace_free_ftrace_ops(tr);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009063 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009064 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04009065 kfree(tr->name);
9066 kfree(tr);
9067
Divya Indif45d1222019-03-20 11:28:51 -07009068 return ERR_PTR(ret);
9069}
Steven Rostedt277ba042012-08-03 16:10:49 -04009070
Divya Indif45d1222019-03-20 11:28:51 -07009071static int instance_mkdir(const char *name)
9072{
Divya Indi28879782019-11-20 11:08:38 -08009073 struct trace_array *tr;
9074 int ret;
9075
9076 mutex_lock(&event_mutex);
9077 mutex_lock(&trace_types_lock);
9078
9079 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009080 if (trace_array_find(name))
9081 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08009082
9083 tr = trace_array_create(name);
9084
9085 ret = PTR_ERR_OR_ZERO(tr);
9086
9087out_unlock:
9088 mutex_unlock(&trace_types_lock);
9089 mutex_unlock(&event_mutex);
9090 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04009091}
9092
Divya Indi28879782019-11-20 11:08:38 -08009093/**
9094 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9095 * @name: The name of the trace array to be looked up/created.
9096 *
9097 * Returns pointer to trace array with given name.
9098 * NULL, if it cannot be created.
9099 *
9100 * NOTE: This function increments the reference counter associated with the
9101 * trace array returned. This makes sure it cannot be freed while in use.
9102 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05009103 * If the trace_array is to be freed, trace_array_destroy() needs to
9104 * be called after the trace_array_put(), or simply let user space delete
9105 * it from the tracefs instances directory. But until the
9106 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08009107 *
9108 */
9109struct trace_array *trace_array_get_by_name(const char *name)
9110{
9111 struct trace_array *tr;
9112
9113 mutex_lock(&event_mutex);
9114 mutex_lock(&trace_types_lock);
9115
9116 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9117 if (tr->name && strcmp(tr->name, name) == 0)
9118 goto out_unlock;
9119 }
9120
9121 tr = trace_array_create(name);
9122
9123 if (IS_ERR(tr))
9124 tr = NULL;
9125out_unlock:
9126 if (tr)
9127 tr->ref++;
9128
9129 mutex_unlock(&trace_types_lock);
9130 mutex_unlock(&event_mutex);
9131 return tr;
9132}
9133EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9134
Divya Indif45d1222019-03-20 11:28:51 -07009135static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009136{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009137 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009138
Divya Indi28879782019-11-20 11:08:38 -08009139 /* Reference counter for a newly created trace array = 1. */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04009140 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
Divya Indif45d1222019-03-20 11:28:51 -07009141 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05009142
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009143 list_del(&tr->list);
9144
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04009145 /* Disable all the flags that were enabled coming in */
9146 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9147 if ((1 << i) & ZEROED_TRACE_FLAGS)
9148 set_tracer_flag(tr, 1 << i, 0);
9149 }
9150
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05009151 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05309152 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009153 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09009154 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009155 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05009156 tracefs_remove(tr->dir);
Yordan Karadzhov (VMware)20344c52021-04-15 21:18:51 +03009157 free_percpu(tr->last_func_repeats);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04009158 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009159
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009160 for (i = 0; i < tr->nr_topts; i++) {
9161 kfree(tr->topts[i].topts);
9162 }
9163 kfree(tr->topts);
9164
Chunyu Hudb9108e02017-07-20 18:36:09 +08009165 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009166 kfree(tr->name);
9167 kfree(tr);
9168
Divya Indif45d1222019-03-20 11:28:51 -07009169 return 0;
9170}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009171
Divya Indie585e642019-08-14 10:55:24 -07009172int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07009173{
Divya Indie585e642019-08-14 10:55:24 -07009174 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07009175 int ret;
9176
Divya Indie585e642019-08-14 10:55:24 -07009177 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07009178 return -EINVAL;
9179
9180 mutex_lock(&event_mutex);
9181 mutex_lock(&trace_types_lock);
9182
Divya Indie585e642019-08-14 10:55:24 -07009183 ret = -ENODEV;
9184
9185 /* Making sure trace array exists before destroying it. */
9186 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9187 if (tr == this_tr) {
9188 ret = __remove_instance(tr);
9189 break;
9190 }
9191 }
Divya Indif45d1222019-03-20 11:28:51 -07009192
9193 mutex_unlock(&trace_types_lock);
9194 mutex_unlock(&event_mutex);
9195
9196 return ret;
9197}
9198EXPORT_SYMBOL_GPL(trace_array_destroy);
9199
9200static int instance_rmdir(const char *name)
9201{
9202 struct trace_array *tr;
9203 int ret;
9204
9205 mutex_lock(&event_mutex);
9206 mutex_lock(&trace_types_lock);
9207
9208 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009209 tr = trace_array_find(name);
9210 if (tr)
9211 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07009212
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009213 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04009214 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009215
9216 return ret;
9217}
9218
Steven Rostedt277ba042012-08-03 16:10:49 -04009219static __init void create_trace_instances(struct dentry *d_tracer)
9220{
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009221 struct trace_array *tr;
9222
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05009223 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9224 instance_mkdir,
9225 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009226 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04009227 return;
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009228
9229 mutex_lock(&event_mutex);
9230 mutex_lock(&trace_types_lock);
9231
9232 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9233 if (!tr->name)
9234 continue;
9235 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9236 "Failed to create instance directory\n"))
9237 break;
9238 }
9239
9240 mutex_unlock(&trace_types_lock);
9241 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04009242}
9243
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009244static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009245init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009246{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009247 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009248 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009249
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05009250 trace_create_file("available_tracers", 0444, d_tracer,
9251 tr, &show_traces_fops);
9252
9253 trace_create_file("current_tracer", 0644, d_tracer,
9254 tr, &set_tracer_fops);
9255
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009256 trace_create_file("tracing_cpumask", 0644, d_tracer,
9257 tr, &tracing_cpumask_fops);
9258
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009259 trace_create_file("trace_options", 0644, d_tracer,
9260 tr, &tracing_iter_fops);
9261
9262 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009263 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009264
9265 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02009266 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009267
9268 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02009269 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009270
9271 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
9272 tr, &tracing_total_entries_fops);
9273
Wang YanQing238ae932013-05-26 16:52:01 +08009274 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009275 tr, &tracing_free_buffer_fops);
9276
9277 trace_create_file("trace_marker", 0220, d_tracer,
9278 tr, &tracing_mark_fops);
9279
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009280 file = __find_event_file(tr, "ftrace", "print");
9281 if (file && file->dir)
9282 trace_create_file("trigger", 0644, file->dir, file,
9283 &event_trigger_fops);
9284 tr->trace_marker_file = file;
9285
Steven Rostedtfa32e852016-07-06 15:25:08 -04009286 trace_create_file("trace_marker_raw", 0220, d_tracer,
9287 tr, &tracing_mark_raw_fops);
9288
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009289 trace_create_file("trace_clock", 0644, d_tracer, tr,
9290 &trace_clock_fops);
9291
9292 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009293 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009294
Tom Zanussi2c1ea602018-01-15 20:51:41 -06009295 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
9296 &trace_time_stamp_mode_fops);
9297
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05009298 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05009299
9300 trace_create_file("buffer_percent", 0444, d_tracer,
9301 tr, &buffer_percent_fops);
9302
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04009303 create_trace_options_dir(tr);
9304
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04009305#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02009306 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05009307#endif
9308
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009309 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009310 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009311
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009312#ifdef CONFIG_TRACER_SNAPSHOT
9313 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009314 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009315#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009316
Tom Zanussi8a062902019-03-31 18:48:15 -05009317 trace_create_file("error_log", 0644, d_tracer,
9318 tr, &tracing_err_log_fops);
9319
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009320 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009321 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009322
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04009323 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009324}
9325
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009326static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009327{
9328 struct vfsmount *mnt;
9329 struct file_system_type *type;
9330
9331 /*
9332 * To maintain backward compatibility for tools that mount
9333 * debugfs to get to the tracing facility, tracefs is automatically
9334 * mounted to the debugfs/tracing directory.
9335 */
9336 type = get_fs_type("tracefs");
9337 if (!type)
9338 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009339 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009340 put_filesystem(type);
9341 if (IS_ERR(mnt))
9342 return NULL;
9343 mntget(mnt);
9344
9345 return mnt;
9346}
9347
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009348/**
9349 * tracing_init_dentry - initialize top level trace array
9350 *
9351 * This is called when creating files or directories in the tracing
9352 * directory. It is called via fs_initcall() by any of the boot up code
9353 * and expects to return the dentry of the top level tracing directory.
9354 */
Wei Yang22c36b12020-07-12 09:10:36 +08009355int tracing_init_dentry(void)
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009356{
9357 struct trace_array *tr = &global_trace;
9358
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009359 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009360 pr_warn("Tracing disabled due to lockdown\n");
Wei Yang22c36b12020-07-12 09:10:36 +08009361 return -EPERM;
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009362 }
9363
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009364 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009365 if (tr->dir)
Wei Yang22c36b12020-07-12 09:10:36 +08009366 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009367
Peter Enderborg072e1332020-07-16 09:15:10 +02009368 if (WARN_ON(!tracefs_initialized()))
Wei Yang22c36b12020-07-12 09:10:36 +08009369 return -ENODEV;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009370
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009371 /*
9372 * As there may still be users that expect the tracing
9373 * files to exist in debugfs/tracing, we must automount
9374 * the tracefs file system there, so older tools still
Ingo Molnarf2cc0202021-03-23 18:49:35 +01009375 * work with the newer kernel.
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009376 */
9377 tr->dir = debugfs_create_automount("tracing", NULL,
9378 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009379
Wei Yang22c36b12020-07-12 09:10:36 +08009380 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009381}
9382
Jeremy Linton00f4b652017-05-31 16:56:43 -05009383extern struct trace_eval_map *__start_ftrace_eval_maps[];
9384extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009385
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009386static struct workqueue_struct *eval_map_wq __initdata;
9387static struct work_struct eval_map_work __initdata;
9388
9389static void __init eval_map_work_func(struct work_struct *work)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009390{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009391 int len;
9392
Jeremy Linton02fd7f62017-05-31 16:56:42 -05009393 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009394 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009395}
9396
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009397static int __init trace_eval_init(void)
9398{
9399 INIT_WORK(&eval_map_work, eval_map_work_func);
9400
9401 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9402 if (!eval_map_wq) {
9403 pr_err("Unable to allocate eval_map_wq\n");
9404 /* Do work here */
9405 eval_map_work_func(&eval_map_work);
9406 return -ENOMEM;
9407 }
9408
9409 queue_work(eval_map_wq, &eval_map_work);
9410 return 0;
9411}
9412
9413static int __init trace_eval_sync(void)
9414{
9415 /* Make sure the eval map updates are finished */
9416 if (eval_map_wq)
9417 destroy_workqueue(eval_map_wq);
9418 return 0;
9419}
9420
9421late_initcall_sync(trace_eval_sync);
9422
9423
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009424#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009425static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009426{
Jeremy Linton99be6472017-05-31 16:56:44 -05009427 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009428 return;
9429
9430 /*
9431 * Modules with bad taint do not have events created, do
9432 * not bother with enums either.
9433 */
9434 if (trace_module_has_bad_taint(mod))
9435 return;
9436
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009437 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009438}
9439
Jeremy Linton681bec02017-05-31 16:56:53 -05009440#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009441static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009442{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009443 union trace_eval_map_item *map;
9444 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009445
Jeremy Linton99be6472017-05-31 16:56:44 -05009446 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009447 return;
9448
Jeremy Linton1793ed92017-05-31 16:56:46 -05009449 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009450
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009451 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009452
9453 while (map) {
9454 if (map->head.mod == mod)
9455 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009456 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009457 last = &map->tail.next;
9458 map = map->tail.next;
9459 }
9460 if (!map)
9461 goto out;
9462
Jeremy Linton5f60b352017-05-31 16:56:47 -05009463 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009464 kfree(map);
9465 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009466 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009467}
9468#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009469static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009470#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009471
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009472static int trace_module_notify(struct notifier_block *self,
9473 unsigned long val, void *data)
9474{
9475 struct module *mod = data;
9476
9477 switch (val) {
9478 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009479 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009480 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009481 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009482 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009483 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009484 }
9485
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02009486 return NOTIFY_OK;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009487}
9488
9489static struct notifier_block trace_module_nb = {
9490 .notifier_call = trace_module_notify,
9491 .priority = 0,
9492};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009493#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009494
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009495static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009496{
Wei Yang22c36b12020-07-12 09:10:36 +08009497 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009498
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009499 trace_access_lock_init();
9500
Wei Yang22c36b12020-07-12 09:10:36 +08009501 ret = tracing_init_dentry();
9502 if (ret)
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009503 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009504
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009505 event_trace_init();
9506
Wei Yang22c36b12020-07-12 09:10:36 +08009507 init_tracer_tracefs(&global_trace, NULL);
9508 ftrace_init_tracefs_toplevel(&global_trace, NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009509
Wei Yang22c36b12020-07-12 09:10:36 +08009510 trace_create_file("tracing_thresh", 0644, NULL,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009511 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009512
Wei Yang22c36b12020-07-12 09:10:36 +08009513 trace_create_file("README", 0444, NULL,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009514 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009515
Wei Yang22c36b12020-07-12 09:10:36 +08009516 trace_create_file("saved_cmdlines", 0444, NULL,
Avadh Patel69abe6a2009-04-10 16:04:48 -04009517 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009518
Wei Yang22c36b12020-07-12 09:10:36 +08009519 trace_create_file("saved_cmdlines_size", 0644, NULL,
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009520 NULL, &tracing_saved_cmdlines_size_fops);
9521
Wei Yang22c36b12020-07-12 09:10:36 +08009522 trace_create_file("saved_tgids", 0444, NULL,
Michael Sartain99c621d2017-07-05 22:07:15 -06009523 NULL, &tracing_saved_tgids_fops);
9524
Jeremy Linton5f60b352017-05-31 16:56:47 -05009525 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009526
Wei Yang22c36b12020-07-12 09:10:36 +08009527 trace_create_eval_file(NULL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009528
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009529#ifdef CONFIG_MODULES
9530 register_module_notifier(&trace_module_nb);
9531#endif
9532
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009533#ifdef CONFIG_DYNAMIC_FTRACE
Wei Yang22c36b12020-07-12 09:10:36 +08009534 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009535 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009536#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009537
Wei Yang22c36b12020-07-12 09:10:36 +08009538 create_trace_instances(NULL);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009539
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009540 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009541
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009542 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009543}
9544
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009545static int trace_panic_handler(struct notifier_block *this,
9546 unsigned long event, void *unused)
9547{
Steven Rostedt944ac422008-10-23 19:26:08 -04009548 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009549 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009550 return NOTIFY_OK;
9551}
9552
9553static struct notifier_block trace_panic_notifier = {
9554 .notifier_call = trace_panic_handler,
9555 .next = NULL,
9556 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9557};
9558
9559static int trace_die_handler(struct notifier_block *self,
9560 unsigned long val,
9561 void *data)
9562{
9563 switch (val) {
9564 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009565 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009566 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009567 break;
9568 default:
9569 break;
9570 }
9571 return NOTIFY_OK;
9572}
9573
9574static struct notifier_block trace_die_notifier = {
9575 .notifier_call = trace_die_handler,
9576 .priority = 200
9577};
9578
9579/*
9580 * printk is set to max of 1024, we really don't need it that big.
9581 * Nothing should be printing 1000 characters anyway.
9582 */
9583#define TRACE_MAX_PRINT 1000
9584
9585/*
9586 * Define here KERN_TRACE so that we have one place to modify
9587 * it if we decide to change what log level the ftrace dump
9588 * should be at.
9589 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009590#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009591
Jason Wessel955b61e2010-08-05 09:22:23 -05009592void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009593trace_printk_seq(struct trace_seq *s)
9594{
9595 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009596 if (s->seq.len >= TRACE_MAX_PRINT)
9597 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009598
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009599 /*
9600 * More paranoid code. Although the buffer size is set to
9601 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9602 * an extra layer of protection.
9603 */
9604 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9605 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009606
9607 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009608 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009609
9610 printk(KERN_TRACE "%s", s->buffer);
9611
Steven Rostedtf9520752009-03-02 14:04:40 -05009612 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009613}
9614
Jason Wessel955b61e2010-08-05 09:22:23 -05009615void trace_init_global_iter(struct trace_iterator *iter)
9616{
9617 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009618 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009619 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009620 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009621
9622 if (iter->trace && iter->trace->open)
9623 iter->trace->open(iter);
9624
9625 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009626 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009627 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9628
9629 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9630 if (trace_clocks[iter->tr->clock_id].in_ns)
9631 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009632}
9633
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009634void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009635{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009636 /* use static because iter can be a bit big for the stack */
9637 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009638 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009639 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009640 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009641 unsigned long flags;
9642 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009643
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009644 /* Only allow one dump user at a time. */
9645 if (atomic_inc_return(&dump_running) != 1) {
9646 atomic_dec(&dump_running);
9647 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009648 }
9649
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009650 /*
9651 * Always turn off tracing when we dump.
9652 * We don't need to show trace output of what happens
9653 * between multiple crashes.
9654 *
9655 * If the user does a sysrq-z, then they can re-enable
9656 * tracing with echo 1 > tracing_on.
9657 */
9658 tracing_off();
9659
9660 local_irq_save(flags);
Petr Mladek03fc7f92018-06-27 16:20:28 +02009661 printk_nmi_direct_enter();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009662
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009663 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009664 trace_init_global_iter(&iter);
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09009665 /* Can not use kmalloc for iter.temp and iter.fmt */
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009666 iter.temp = static_temp_buf;
9667 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09009668 iter.fmt = static_fmt_buf;
9669 iter.fmt_size = STATIC_FMT_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009670
Steven Rostedtd7690412008-10-01 00:29:53 -04009671 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009672 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009673 }
9674
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009675 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009676
Török Edwinb54d3de2008-11-22 13:28:48 +02009677 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009678 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009679
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009680 switch (oops_dump_mode) {
9681 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009682 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009683 break;
9684 case DUMP_ORIG:
9685 iter.cpu_file = raw_smp_processor_id();
9686 break;
9687 case DUMP_NONE:
9688 goto out_enable;
9689 default:
9690 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009691 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009692 }
9693
9694 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009695
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009696 /* Did function tracer already get disabled? */
9697 if (ftrace_is_dead()) {
9698 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9699 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9700 }
9701
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009702 /*
Randy Dunlap5c8c2062020-08-06 20:32:59 -07009703 * We need to stop all tracing on all CPUS to read
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009704 * the next buffer. This is a bit expensive, but is
9705 * not done often. We fill all what we can read,
9706 * and then release the locks again.
9707 */
9708
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009709 while (!trace_empty(&iter)) {
9710
9711 if (!cnt)
9712 printk(KERN_TRACE "---------------------------------\n");
9713
9714 cnt++;
9715
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009716 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009717 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009718
Jason Wessel955b61e2010-08-05 09:22:23 -05009719 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009720 int ret;
9721
9722 ret = print_trace_line(&iter);
9723 if (ret != TRACE_TYPE_NO_CONSUME)
9724 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009725 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009726 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009727
9728 trace_printk_seq(&iter.seq);
9729 }
9730
9731 if (!cnt)
9732 printk(KERN_TRACE " (ftrace buffer empty)\n");
9733 else
9734 printk(KERN_TRACE "---------------------------------\n");
9735
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009736 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009737 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009738
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009739 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009740 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009741 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009742 atomic_dec(&dump_running);
9743 printk_nmi_direct_exit();
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009744 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009745}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009746EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009747
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009748#define WRITE_BUFSIZE 4096
9749
9750ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9751 size_t count, loff_t *ppos,
Masami Hiramatsud2622712021-02-01 13:48:11 -06009752 int (*createfn)(const char *))
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009753{
9754 char *kbuf, *buf, *tmp;
9755 int ret = 0;
9756 size_t done = 0;
9757 size_t size;
9758
9759 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9760 if (!kbuf)
9761 return -ENOMEM;
9762
9763 while (done < count) {
9764 size = count - done;
9765
9766 if (size >= WRITE_BUFSIZE)
9767 size = WRITE_BUFSIZE - 1;
9768
9769 if (copy_from_user(kbuf, buffer + done, size)) {
9770 ret = -EFAULT;
9771 goto out;
9772 }
9773 kbuf[size] = '\0';
9774 buf = kbuf;
9775 do {
9776 tmp = strchr(buf, '\n');
9777 if (tmp) {
9778 *tmp = '\0';
9779 size = tmp - buf + 1;
9780 } else {
9781 size = strlen(buf);
9782 if (done + size < count) {
9783 if (buf != kbuf)
9784 break;
9785 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9786 pr_warn("Line length is too long: Should be less than %d\n",
9787 WRITE_BUFSIZE - 2);
9788 ret = -EINVAL;
9789 goto out;
9790 }
9791 }
9792 done += size;
9793
9794 /* Remove comments */
9795 tmp = strchr(buf, '#');
9796
9797 if (tmp)
9798 *tmp = '\0';
9799
Masami Hiramatsud2622712021-02-01 13:48:11 -06009800 ret = createfn(buf);
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009801 if (ret)
9802 goto out;
9803 buf += size;
9804
9805 } while (done < count);
9806 }
9807 ret = done;
9808
9809out:
9810 kfree(kbuf);
9811
9812 return ret;
9813}
9814
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009815__init static int tracer_alloc_buffers(void)
9816{
Steven Rostedt73c51622009-03-11 13:42:01 -04009817 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309818 int ret = -ENOMEM;
9819
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009820
9821 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009822 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009823 return -EPERM;
9824 }
9825
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009826 /*
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009827 * Make sure we don't accidentally add more trace options
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009828 * than we have bits for.
9829 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009830 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009831
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309832 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9833 goto out;
9834
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009835 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309836 goto out_free_buffer_mask;
9837
Steven Rostedt07d777f2011-09-22 14:01:55 -04009838 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -07009839 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04009840 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04009841 trace_printk_init_buffers();
9842
Steven Rostedt73c51622009-03-11 13:42:01 -04009843 /* To save memory, keep the ring buffer size to its minimum */
9844 if (ring_buffer_expanded)
9845 ring_buf_size = trace_buf_size;
9846 else
9847 ring_buf_size = 1;
9848
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309849 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009850 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009851
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009852 raw_spin_lock_init(&global_trace.start_lock);
9853
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009854 /*
9855 * The prepare callbacks allocates some memory for the ring buffer. We
Qiujun Huang499f7bb2020-10-10 22:09:24 +08009856 * don't free the buffer if the CPU goes down. If we were to free
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009857 * the buffer, then the user would lose any trace that was in the
9858 * buffer. The memory will be removed once the "instance" is removed.
9859 */
9860 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9861 "trace/RB:preapre", trace_rb_cpu_prepare,
9862 NULL);
9863 if (ret < 0)
9864 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009865 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +03009866 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009867 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9868 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009869 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009870
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009871 if (trace_create_savedcmd() < 0)
9872 goto out_free_temp_buffer;
9873
Steven Rostedtab464282008-05-12 21:21:00 +02009874 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009875 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009876 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009877 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009878 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04009879
Steven Rostedt499e5472012-02-22 15:50:28 -05009880 if (global_trace.buffer_disabled)
9881 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009882
Steven Rostedte1e232c2014-02-10 23:38:46 -05009883 if (trace_boot_clock) {
9884 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9885 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -07009886 pr_warn("Trace clock %s not defined, going back to default\n",
9887 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -05009888 }
9889
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009890 /*
9891 * register_tracer() might reference current_trace, so it
9892 * needs to be set before we register anything. This is
9893 * just a bootstrap of current_trace anyway.
9894 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009895 global_trace.current_trace = &nop_trace;
9896
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009897 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9898
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05009899 ftrace_init_global_array_ops(&global_trace);
9900
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009901 init_trace_flags_index(&global_trace);
9902
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04009903 register_tracer(&nop_trace);
9904
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -05009905 /* Function tracing may start here (via kernel command line) */
9906 init_function_trace();
9907
Steven Rostedt60a11772008-05-12 21:20:44 +02009908 /* All seems OK, enable tracing */
9909 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04009910
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009911 atomic_notifier_chain_register(&panic_notifier_list,
9912 &trace_panic_notifier);
9913
9914 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009915
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009916 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9917
9918 INIT_LIST_HEAD(&global_trace.systems);
9919 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009920 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009921 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -04009922 list_add(&global_trace.list, &ftrace_trace_arrays);
9923
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08009924 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04009925
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04009926 register_snapshot_cmd();
9927
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05009928 test_can_verify();
9929
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01009930 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009931
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009932out_free_savedcmd:
9933 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04009934out_free_temp_buffer:
9935 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +01009936out_rm_hp_state:
9937 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309938out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009939 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309940out_free_buffer_mask:
9941 free_cpumask_var(tracing_buffer_mask);
9942out:
9943 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009944}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009945
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009946void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009947{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009948 if (tracepoint_printk) {
9949 tracepoint_print_iter =
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -04009950 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009951 if (MEM_FAIL(!tracepoint_print_iter,
9952 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009953 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05009954 else
9955 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05009956 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009957 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -05009958}
9959
9960void __init trace_init(void)
9961{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009962 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05009963}
9964
Steven Rostedtb2821ae2009-02-02 21:38:32 -05009965__init static int clear_boot_tracer(void)
9966{
9967 /*
9968 * The default tracer at boot buffer is an init section.
9969 * This function is called in lateinit. If we did not
9970 * find the boot tracer, then clear it out, to prevent
9971 * later registration from accessing the buffer that is
9972 * about to be freed.
9973 */
9974 if (!default_bootup_tracer)
9975 return 0;
9976
9977 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9978 default_bootup_tracer);
9979 default_bootup_tracer = NULL;
9980
9981 return 0;
9982}
9983
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009984fs_initcall(tracer_init_tracefs);
Steven Rostedt (VMware)4bb0f0e2017-08-01 12:01:52 -04009985late_initcall_sync(clear_boot_tracer);
Chris Wilson3fd49c92018-03-30 16:01:31 +01009986
9987#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9988__init static int tracing_set_default_clock(void)
9989{
9990 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +01009991 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +09009992 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9993 pr_warn("Can not set tracing clock due to lockdown\n");
9994 return -EPERM;
9995 }
9996
Chris Wilson3fd49c92018-03-30 16:01:31 +01009997 printk(KERN_WARNING
9998 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9999 "If you want to keep using the local clock, then add:\n"
10000 " \"trace_clock=local\"\n"
10001 "on the kernel command line\n");
10002 tracing_set_clock(&global_trace, "global");
10003 }
10004
10005 return 0;
10006}
10007late_initcall_sync(tracing_set_default_clock);
10008#endif