blob: 7c2578efde26d4624b117346a208efa903275a5f [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Andy Shevchenkof39650d2021-06-30 18:54:59 -070042#include <linux/panic_notifier.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020043#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050044#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080046#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010047#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060048#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020049#include <linux/fsnotify.h>
50#include <linux/irq_work.h>
51#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020052
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020053#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050054#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020055
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056/*
Steven Rostedt73c51622009-03-11 13:42:01 -040057 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
59 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050060bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040061
62/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010063 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010064 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010066 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010067 * at the same time, giving false positive or negative results.
68 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010069static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010070
Steven Rostedtb2821ae2009-02-02 21:38:32 -050071/*
Masami Hiramatsu60efe212020-12-08 17:54:09 +090072 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
Steven Rostedtb2821ae2009-02-02 21:38:32 -050074 */
Li Zefan020e5f82009-07-01 10:47:05 +080075bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050076
Masami Hiramatsu60efe212020-12-08 17:54:09 +090077#ifdef CONFIG_FTRACE_STARTUP_TEST
78void __init disable_tracing_selftest(const char *reason)
79{
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
83 }
84}
85#endif
86
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050087/* Pipe tracepoints to printk */
88struct trace_iterator *tracepoint_print_iter;
89int tracepoint_printk;
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040090static bool tracepoint_printk_stop_on_boot __initdata;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050091static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050092
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010093/* For tracers that don't implement custom flags */
94static struct tracer_opt dummy_tracer_opt[] = {
95 { }
96};
97
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050098static int
99dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +0100100{
101 return 0;
102}
Steven Rostedt0f048702008-11-05 16:05:44 -0500103
104/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
107 * occurred.
108 */
Joel Fernandesd914ba32017-06-26 19:01:55 -0700109static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400110
111/*
Steven Rostedt0f048702008-11-05 16:05:44 -0500112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
115 * this back to zero.
116 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100117static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500118
Jason Wessel955b61e2010-08-05 09:22:23 -0500119cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200120
Steven Rostedt944ac422008-10-23 19:26:08 -0400121/*
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123 *
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
128 * serial console.
129 *
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400135 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200136
137enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400138
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400139/* When set, tracing will stop when a WARN*() is hit */
140int __disable_trace_on_warning;
141
Jeremy Linton681bec02017-05-31 16:56:53 -0500142#ifdef CONFIG_TRACE_EVAL_MAP_FILE
143/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500144struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400145 struct module *mod;
146 unsigned long length;
147};
148
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500149union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400150
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500151struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400152 /*
153 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500154 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400157 const char *end; /* points to NULL */
158};
159
Jeremy Linton1793ed92017-05-31 16:56:46 -0500160static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400161
162/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500163 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500167 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400168 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500169union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500170 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400173};
174
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500175static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500176#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400177
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900178int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -0500179static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100181 unsigned int trace_ctx);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500182
Li Zefanee6c2c12009-09-18 14:06:47 +0800183#define MAX_TRACER_SIZE 100
184static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500185static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100186
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500187static bool allocate_snapshot;
188
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200189static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100190{
Chen Gang67012ab2013-04-08 12:06:44 +0800191 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500192 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400193 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500194 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100195 return 1;
196}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200197__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100198
Steven Rostedt944ac422008-10-23 19:26:08 -0400199static int __init set_ftrace_dump_on_oops(char *str)
200{
Steven Rostedt (VMware)2db7ab62021-06-17 16:20:41 -0400201 if (*str++ != '=' || !*str || !strcmp("1", str)) {
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200202 ftrace_dump_on_oops = DUMP_ALL;
203 return 1;
204 }
205
Steven Rostedt (VMware)2db7ab62021-06-17 16:20:41 -0400206 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200207 ftrace_dump_on_oops = DUMP_ORIG;
208 return 1;
209 }
210
211 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400212}
213__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200214
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400215static int __init stop_trace_on_warning(char *str)
216{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200217 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
218 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400219 return 1;
220}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200221__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400222
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400223static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500224{
225 allocate_snapshot = true;
226 /* We also need the main ring buffer expanded */
227 ring_buffer_expanded = true;
228 return 1;
229}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400230__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500231
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400232
233static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400234
235static int __init set_trace_boot_options(char *str)
236{
Chen Gang67012ab2013-04-08 12:06:44 +0800237 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400238 return 0;
239}
240__setup("trace_options=", set_trace_boot_options);
241
Steven Rostedte1e232c2014-02-10 23:38:46 -0500242static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
243static char *trace_boot_clock __initdata;
244
245static int __init set_trace_boot_clock(char *str)
246{
247 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
248 trace_boot_clock = trace_boot_clock_buf;
249 return 0;
250}
251__setup("trace_clock=", set_trace_boot_clock);
252
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500253static int __init set_tracepoint_printk(char *str)
254{
JaeSang Yoo3203ce32022-02-09 04:54:22 +0900255 /* Ignore the "tp_printk_stop_on_boot" param */
256 if (*str == '_')
257 return 0;
258
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500259 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
260 tracepoint_printk = 1;
261 return 1;
262}
263__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400264
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -0400265static int __init set_tracepoint_printk_stop(char *str)
266{
267 tracepoint_printk_stop_on_boot = true;
268 return 1;
269}
270__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
271
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100272unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200273{
274 nsec += 500;
275 do_div(nsec, 1000);
276 return nsec;
277}
278
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300279static void
280trace_process_export(struct trace_export *export,
281 struct ring_buffer_event *event, int flag)
282{
283 struct trace_entry *entry;
284 unsigned int size = 0;
285
286 if (export->flags & flag) {
287 entry = ring_buffer_event_data(event);
288 size = ring_buffer_event_length(event);
289 export->write(export, entry, size);
290 }
291}
292
293static DEFINE_MUTEX(ftrace_export_lock);
294
295static struct trace_export __rcu *ftrace_exports_list __read_mostly;
296
297static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
298static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300299static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300300
301static inline void ftrace_exports_enable(struct trace_export *export)
302{
303 if (export->flags & TRACE_EXPORT_FUNCTION)
304 static_branch_inc(&trace_function_exports_enabled);
305
306 if (export->flags & TRACE_EXPORT_EVENT)
307 static_branch_inc(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300308
309 if (export->flags & TRACE_EXPORT_MARKER)
310 static_branch_inc(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300311}
312
313static inline void ftrace_exports_disable(struct trace_export *export)
314{
315 if (export->flags & TRACE_EXPORT_FUNCTION)
316 static_branch_dec(&trace_function_exports_enabled);
317
318 if (export->flags & TRACE_EXPORT_EVENT)
319 static_branch_dec(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300320
321 if (export->flags & TRACE_EXPORT_MARKER)
322 static_branch_dec(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300323}
324
325static void ftrace_exports(struct ring_buffer_event *event, int flag)
326{
327 struct trace_export *export;
328
329 preempt_disable_notrace();
330
331 export = rcu_dereference_raw_check(ftrace_exports_list);
332 while (export) {
333 trace_process_export(export, event, flag);
334 export = rcu_dereference_raw_check(export->next);
335 }
336
337 preempt_enable_notrace();
338}
339
340static inline void
341add_trace_export(struct trace_export **list, struct trace_export *export)
342{
343 rcu_assign_pointer(export->next, *list);
344 /*
345 * We are entering export into the list but another
346 * CPU might be walking that list. We need to make sure
347 * the export->next pointer is valid before another CPU sees
348 * the export pointer included into the list.
349 */
350 rcu_assign_pointer(*list, export);
351}
352
353static inline int
354rm_trace_export(struct trace_export **list, struct trace_export *export)
355{
356 struct trace_export **p;
357
358 for (p = list; *p != NULL; p = &(*p)->next)
359 if (*p == export)
360 break;
361
362 if (*p != export)
363 return -1;
364
365 rcu_assign_pointer(*p, (*p)->next);
366
367 return 0;
368}
369
370static inline void
371add_ftrace_export(struct trace_export **list, struct trace_export *export)
372{
373 ftrace_exports_enable(export);
374
375 add_trace_export(list, export);
376}
377
378static inline int
379rm_ftrace_export(struct trace_export **list, struct trace_export *export)
380{
381 int ret;
382
383 ret = rm_trace_export(list, export);
384 ftrace_exports_disable(export);
385
386 return ret;
387}
388
389int register_ftrace_export(struct trace_export *export)
390{
391 if (WARN_ON_ONCE(!export->write))
392 return -1;
393
394 mutex_lock(&ftrace_export_lock);
395
396 add_ftrace_export(&ftrace_exports_list, export);
397
398 mutex_unlock(&ftrace_export_lock);
399
400 return 0;
401}
402EXPORT_SYMBOL_GPL(register_ftrace_export);
403
404int unregister_ftrace_export(struct trace_export *export)
405{
406 int ret;
407
408 mutex_lock(&ftrace_export_lock);
409
410 ret = rm_ftrace_export(&ftrace_exports_list, export);
411
412 mutex_unlock(&ftrace_export_lock);
413
414 return ret;
415}
416EXPORT_SYMBOL_GPL(unregister_ftrace_export);
417
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400418/* trace_flags holds trace_options default values */
419#define TRACE_DEFAULT_FLAGS \
420 (FUNCTION_DEFAULT_FLAGS | \
421 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
422 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
423 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
Steven Rostedt (VMware)99e22ce2021-02-12 11:51:06 -0500424 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
425 TRACE_ITER_HASH_PTR)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400426
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400427/* trace_options that are only supported by global_trace */
428#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
429 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
430
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400431/* trace_flags that are default zero for instances */
432#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900433 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400434
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200435/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800436 * The global_trace is the descriptor that holds the top-level tracing
437 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200438 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400439static struct trace_array global_trace = {
440 .trace_flags = TRACE_DEFAULT_FLAGS,
441};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200442
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400443LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200444
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400445int trace_array_get(struct trace_array *this_tr)
446{
447 struct trace_array *tr;
448 int ret = -ENODEV;
449
450 mutex_lock(&trace_types_lock);
451 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
452 if (tr == this_tr) {
453 tr->ref++;
454 ret = 0;
455 break;
456 }
457 }
458 mutex_unlock(&trace_types_lock);
459
460 return ret;
461}
462
463static void __trace_array_put(struct trace_array *this_tr)
464{
465 WARN_ON(!this_tr->ref);
466 this_tr->ref--;
467}
468
Divya Indi28879782019-11-20 11:08:38 -0800469/**
470 * trace_array_put - Decrement the reference counter for this trace array.
Bean Huo557d50e2021-01-12 12:12:02 +0100471 * @this_tr : pointer to the trace array
Divya Indi28879782019-11-20 11:08:38 -0800472 *
473 * NOTE: Use this when we no longer need the trace array returned by
474 * trace_array_get_by_name(). This ensures the trace array can be later
475 * destroyed.
476 *
477 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400478void trace_array_put(struct trace_array *this_tr)
479{
Divya Indi28879782019-11-20 11:08:38 -0800480 if (!this_tr)
481 return;
482
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400483 mutex_lock(&trace_types_lock);
484 __trace_array_put(this_tr);
485 mutex_unlock(&trace_types_lock);
486}
Divya Indi28879782019-11-20 11:08:38 -0800487EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400488
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400489int tracing_check_open_get_tr(struct trace_array *tr)
490{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400491 int ret;
492
493 ret = security_locked_down(LOCKDOWN_TRACEFS);
494 if (ret)
495 return ret;
496
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400497 if (tracing_disabled)
498 return -ENODEV;
499
500 if (tr && trace_array_get(tr) < 0)
501 return -ENODEV;
502
503 return 0;
504}
505
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400506int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500507 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500508 struct ring_buffer_event *event)
509{
510 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
511 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400512 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500513 return 1;
514 }
515
516 return 0;
517}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500518
Steven Rostedtd8275c42016-04-14 12:15:22 -0400519/**
520 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
521 * @filtered_pids: The list of pids to check
522 * @search_pid: The PID to find in @filtered_pids
523 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100524 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400525 */
526bool
527trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
528{
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400529 return trace_pid_list_is_set(filtered_pids, search_pid);
Steven Rostedtd8275c42016-04-14 12:15:22 -0400530}
531
532/**
533 * trace_ignore_this_task - should a task be ignored for tracing
534 * @filtered_pids: The list of pids to check
Qiujun Huangb3ca59f2020-12-31 10:35:58 -0500535 * @filtered_no_pids: The list of pids not to be traced
Steven Rostedtd8275c42016-04-14 12:15:22 -0400536 * @task: The task that should be ignored if not filtered
537 *
538 * Checks if @task should be traced or not from @filtered_pids.
539 * Returns true if @task should *NOT* be traced.
540 * Returns false if @task should be traced.
541 */
542bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400543trace_ignore_this_task(struct trace_pid_list *filtered_pids,
544 struct trace_pid_list *filtered_no_pids,
545 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400546{
547 /*
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100548 * If filtered_no_pids is not empty, and the task's pid is listed
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400549 * in filtered_no_pids, then return true.
550 * Otherwise, if filtered_pids is empty, that means we can
551 * trace all tasks. If it has content, then only trace pids
552 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400553 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400554
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400555 return (filtered_pids &&
556 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
557 (filtered_no_pids &&
558 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400559}
560
561/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700562 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400563 * @pid_list: The list to modify
564 * @self: The current task for fork or NULL for exit
565 * @task: The task to add or remove
566 *
567 * If adding a task, if @self is defined, the task is only added if @self
568 * is also included in @pid_list. This happens on fork and tasks should
569 * only be added when the parent is listed. If @self is NULL, then the
570 * @task pid will be removed from the list, which would happen on exit
571 * of a task.
572 */
573void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
574 struct task_struct *self,
575 struct task_struct *task)
576{
577 if (!pid_list)
578 return;
579
580 /* For forks, we only add if the forking task is listed */
581 if (self) {
582 if (!trace_find_filtered_pid(pid_list, self->pid))
583 return;
584 }
585
Steven Rostedtd8275c42016-04-14 12:15:22 -0400586 /* "self" is set for forks, and NULL for exits */
587 if (self)
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400588 trace_pid_list_set(pid_list, task->pid);
Steven Rostedtd8275c42016-04-14 12:15:22 -0400589 else
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400590 trace_pid_list_clear(pid_list, task->pid);
Steven Rostedtd8275c42016-04-14 12:15:22 -0400591}
592
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400593/**
594 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
595 * @pid_list: The pid list to show
596 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
597 * @pos: The position of the file
598 *
599 * This is used by the seq_file "next" operation to iterate the pids
600 * listed in a trace_pid_list structure.
601 *
602 * Returns the pid+1 as we want to display pid of zero, but NULL would
603 * stop the iteration.
604 */
605void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
606{
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400607 long pid = (unsigned long)v;
608 unsigned int next;
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400609
610 (*pos)++;
611
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100612 /* pid already is +1 of the actual previous bit */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400613 if (trace_pid_list_next(pid_list, pid, &next) < 0)
614 return NULL;
615
616 pid = next;
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400617
618 /* Return pid + 1 to allow zero to be represented */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400619 return (void *)(pid + 1);
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400620}
621
622/**
623 * trace_pid_start - Used for seq_file to start reading pid lists
624 * @pid_list: The pid list to show
625 * @pos: The position of the file
626 *
627 * This is used by seq_file "start" operation to start the iteration
628 * of listing pids.
629 *
630 * Returns the pid+1 as we want to display pid of zero, but NULL would
631 * stop the iteration.
632 */
633void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
634{
635 unsigned long pid;
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400636 unsigned int first;
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400637 loff_t l = 0;
638
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400639 if (trace_pid_list_first(pid_list, &first) < 0)
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400640 return NULL;
641
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400642 pid = first;
643
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400644 /* Return pid + 1 so that zero can be the exit value */
645 for (pid++; pid && l < *pos;
646 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
647 ;
648 return (void *)pid;
649}
650
651/**
652 * trace_pid_show - show the current pid in seq_file processing
653 * @m: The seq_file structure to write into
654 * @v: A void pointer of the pid (+1) value to display
655 *
656 * Can be directly used by seq_file operations to display the current
657 * pid value.
658 */
659int trace_pid_show(struct seq_file *m, void *v)
660{
661 unsigned long pid = (unsigned long)v - 1;
662
663 seq_printf(m, "%lu\n", pid);
664 return 0;
665}
666
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400667/* 128 should be much more than enough */
668#define PID_BUF_SIZE 127
669
670int trace_pid_write(struct trace_pid_list *filtered_pids,
671 struct trace_pid_list **new_pid_list,
672 const char __user *ubuf, size_t cnt)
673{
674 struct trace_pid_list *pid_list;
675 struct trace_parser parser;
676 unsigned long val;
677 int nr_pids = 0;
678 ssize_t read = 0;
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400679 ssize_t ret;
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400680 loff_t pos;
681 pid_t pid;
682
683 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
684 return -ENOMEM;
685
686 /*
687 * Always recreate a new array. The write is an all or nothing
688 * operation. Always create a new array when adding new pids by
689 * the user. If the operation fails, then the current list is
690 * not modified.
691 */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400692 pid_list = trace_pid_list_alloc();
Wenwen Wang91862cc2019-04-19 21:22:59 -0500693 if (!pid_list) {
694 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400695 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500696 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400697
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400698 if (filtered_pids) {
699 /* copy the current bits to the new max */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400700 ret = trace_pid_list_first(filtered_pids, &pid);
701 while (!ret) {
702 trace_pid_list_set(pid_list, pid);
703 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400704 nr_pids++;
705 }
706 }
707
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400708 ret = 0;
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400709 while (cnt > 0) {
710
711 pos = 0;
712
713 ret = trace_get_user(&parser, ubuf, cnt, &pos);
714 if (ret < 0 || !trace_parser_loaded(&parser))
715 break;
716
717 read += ret;
718 ubuf += ret;
719 cnt -= ret;
720
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400721 ret = -EINVAL;
722 if (kstrtoul(parser.buffer, 0, &val))
723 break;
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400724
725 pid = (pid_t)val;
726
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400727 if (trace_pid_list_set(pid_list, pid) < 0) {
728 ret = -1;
729 break;
730 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400731 nr_pids++;
732
733 trace_parser_clear(&parser);
734 ret = 0;
735 }
736 trace_parser_put(&parser);
737
738 if (ret < 0) {
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400739 trace_pid_list_free(pid_list);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400740 return ret;
741 }
742
743 if (!nr_pids) {
744 /* Cleared the list of pids */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400745 trace_pid_list_free(pid_list);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400746 read = ret;
747 pid_list = NULL;
748 }
749
750 *new_pid_list = pid_list;
751
752 return read;
753}
754
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500755static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400756{
757 u64 ts;
758
759 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700760 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400761 return trace_clock_local();
762
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +0300763 ts = ring_buffer_time_stamp(buf->buffer);
Alexander Z Lam94571582013-08-02 18:36:16 -0700764 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400765
766 return ts;
767}
768
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100769u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700770{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500771 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700772}
773
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400774/**
Qiujun Huangb3ca59f2020-12-31 10:35:58 -0500775 * tracing_is_enabled - Show if global_trace has been enabled
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400776 *
777 * Shows if the global trace has been enabled or not. It uses the
778 * mirror flag "buffer_disabled" to be used in fast paths such as for
779 * the irqsoff tracer. But it may be inaccurate due to races. If you
780 * need to know the accurate state, use tracing_is_on() which is a little
781 * slower, but accurate.
782 */
Steven Rostedt90369902008-11-05 16:05:44 -0500783int tracing_is_enabled(void)
784{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785 /*
786 * For quick access (irqsoff uses this in fast path), just
787 * return the mirror variable of the state of the ring buffer.
788 * It's a little racy, but we don't really care.
789 */
790 smp_rmb();
791 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500792}
793
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200794/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400795 * trace_buf_size is the size in bytes that is allocated
796 * for a buffer. Note, the number of bytes is always rounded
797 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400798 *
799 * This number is purposely set to a low number of 16384.
800 * If the dump on oops happens, it will be much appreciated
801 * to not have to wait for all that output. Anyway this can be
802 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200803 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400804#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400805
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400806static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200807
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200808/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200809static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200810
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200811/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200812 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200813 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700814DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200815
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800816/*
817 * serialize the access of the ring buffer
818 *
819 * ring buffer serializes readers, but it is low level protection.
820 * The validity of the events (which returns by ring_buffer_peek() ..etc)
821 * are not protected by ring buffer.
822 *
823 * The content of events may become garbage if we allow other process consumes
824 * these events concurrently:
825 * A) the page of the consumed events may become a normal page
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100826 * (not reader page) in ring buffer, and this page will be rewritten
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800827 * by events producer.
828 * B) The page of the consumed events may become a page for splice_read,
829 * and this page will be returned to system.
830 *
831 * These primitives allow multi process access to different cpu ring buffer
832 * concurrently.
833 *
834 * These primitives don't distinguish read-only and read-consume access.
835 * Multi read-only access are also serialized.
836 */
837
838#ifdef CONFIG_SMP
839static DECLARE_RWSEM(all_cpu_access_lock);
840static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
841
842static inline void trace_access_lock(int cpu)
843{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500844 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800845 /* gain it for accessing the whole ring buffer. */
846 down_write(&all_cpu_access_lock);
847 } else {
848 /* gain it for accessing a cpu ring buffer. */
849
Steven Rostedtae3b5092013-01-23 15:22:59 -0500850 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800851 down_read(&all_cpu_access_lock);
852
853 /* Secondly block other access to this @cpu ring buffer. */
854 mutex_lock(&per_cpu(cpu_access_lock, cpu));
855 }
856}
857
858static inline void trace_access_unlock(int cpu)
859{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500860 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800861 up_write(&all_cpu_access_lock);
862 } else {
863 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
864 up_read(&all_cpu_access_lock);
865 }
866}
867
868static inline void trace_access_lock_init(void)
869{
870 int cpu;
871
872 for_each_possible_cpu(cpu)
873 mutex_init(&per_cpu(cpu_access_lock, cpu));
874}
875
876#else
877
878static DEFINE_MUTEX(access_lock);
879
880static inline void trace_access_lock(int cpu)
881{
882 (void)cpu;
883 mutex_lock(&access_lock);
884}
885
886static inline void trace_access_unlock(int cpu)
887{
888 (void)cpu;
889 mutex_unlock(&access_lock);
890}
891
892static inline void trace_access_lock_init(void)
893{
894}
895
896#endif
897
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400898#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500899static void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100900 unsigned int trace_ctx,
901 int skip, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400902static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500903 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100904 unsigned int trace_ctx,
905 int skip, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400906
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400907#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500908static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100909 unsigned int trace_ctx,
910 int skip, struct pt_regs *regs)
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400911{
912}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400913static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500914 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100915 unsigned long trace_ctx,
916 int skip, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400917{
918}
919
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400920#endif
921
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500922static __always_inline void
923trace_event_setup(struct ring_buffer_event *event,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100924 int type, unsigned int trace_ctx)
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500925{
926 struct trace_entry *ent = ring_buffer_event_data(event);
927
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100928 tracing_generic_entry_update(ent, type, trace_ctx);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500929}
930
931static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500932__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500933 int type,
934 unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100935 unsigned int trace_ctx)
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500936{
937 struct ring_buffer_event *event;
938
939 event = ring_buffer_lock_reserve(buffer, len);
940 if (event != NULL)
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100941 trace_event_setup(event, type, trace_ctx);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500942
943 return event;
944}
945
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400946void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400947{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500948 if (tr->array_buffer.buffer)
949 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400950 /*
951 * This flag is looked at when buffers haven't been allocated
952 * yet, or by some tracers (like irqsoff), that just want to
953 * know if the ring buffer has been disabled, but it can handle
954 * races of where it gets disabled but we still do a record.
955 * As the check is in the fast path of the tracers, it is more
956 * important to be fast than accurate.
957 */
958 tr->buffer_disabled = 0;
959 /* Make the flag seen by readers */
960 smp_wmb();
961}
962
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200963/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500964 * tracing_on - enable tracing buffers
965 *
966 * This function enables tracing buffers that may have been
967 * disabled with tracing_off.
968 */
969void tracing_on(void)
970{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400971 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500972}
973EXPORT_SYMBOL_GPL(tracing_on);
974
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500975
976static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500977__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500978{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700979 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500980
981 /* If this is the temp buffer, we need to commit fully */
982 if (this_cpu_read(trace_buffered_event) == event) {
983 /* Length is in event->array[0] */
984 ring_buffer_write(buffer, event->array[0], &event->array[1]);
985 /* Release the temp buffer */
986 this_cpu_dec(trace_buffered_event_cnt);
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -0500987 /* ring_buffer_unlock_commit() enables preemption */
988 preempt_enable_notrace();
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500989 } else
990 ring_buffer_unlock_commit(buffer, event);
991}
992
Steven Rostedt499e5472012-02-22 15:50:28 -0500993/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500994 * __trace_puts - write a constant string into the trace buffer.
995 * @ip: The address of the caller
996 * @str: The constant string to write
997 * @size: The size of the string.
998 */
999int __trace_puts(unsigned long ip, const char *str, int size)
1000{
1001 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001002 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001003 struct print_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001004 unsigned int trace_ctx;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001005 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001006
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001007 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001008 return 0;
1009
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001010 if (unlikely(tracing_selftest_running || tracing_disabled))
1011 return 0;
1012
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001013 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1014
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001015 trace_ctx = tracing_gen_ctx();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001016 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001017 ring_buffer_nest_start(buffer);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001018 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1019 trace_ctx);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001020 if (!event) {
1021 size = 0;
1022 goto out;
1023 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001024
1025 entry = ring_buffer_event_data(event);
1026 entry->ip = ip;
1027
1028 memcpy(&entry->buf, str, size);
1029
1030 /* Add a newline if necessary */
1031 if (entry->buf[size - 1] != '\n') {
1032 entry->buf[size] = '\n';
1033 entry->buf[size + 1] = '\0';
1034 } else
1035 entry->buf[size] = '\0';
1036
1037 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001038 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001039 out:
1040 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001041 return size;
1042}
1043EXPORT_SYMBOL_GPL(__trace_puts);
1044
1045/**
1046 * __trace_bputs - write the pointer to a constant string into trace buffer
1047 * @ip: The address of the caller
1048 * @str: The constant string to write to the buffer to
1049 */
1050int __trace_bputs(unsigned long ip, const char *str)
1051{
1052 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001053 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001054 struct bputs_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001055 unsigned int trace_ctx;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001056 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001057 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001058
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001059 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001060 return 0;
1061
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001062 if (unlikely(tracing_selftest_running || tracing_disabled))
1063 return 0;
1064
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001065 trace_ctx = tracing_gen_ctx();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001066 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001067
1068 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001069 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001070 trace_ctx);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001071 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001072 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001073
1074 entry = ring_buffer_event_data(event);
1075 entry->ip = ip;
1076 entry->str = str;
1077
1078 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001079 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001080
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001081 ret = 1;
1082 out:
1083 ring_buffer_nest_end(buffer);
1084 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001085}
1086EXPORT_SYMBOL_GPL(__trace_bputs);
1087
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001088#ifdef CONFIG_TRACER_SNAPSHOT
Zou Wei192b7992020-04-23 12:08:25 +08001089static void tracing_snapshot_instance_cond(struct trace_array *tr,
1090 void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001091{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001092 struct tracer *tracer = tr->current_trace;
1093 unsigned long flags;
1094
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001095 if (in_nmi()) {
1096 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1097 internal_trace_puts("*** snapshot is being ignored ***\n");
1098 return;
1099 }
1100
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001101 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001102 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1103 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001104 tracing_off();
1105 return;
1106 }
1107
1108 /* Note, snapshot can not be used when the tracer uses it */
1109 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001110 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1111 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001112 return;
1113 }
1114
1115 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -06001116 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001117 local_irq_restore(flags);
1118}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001119
Tom Zanussia35873a2019-02-13 17:42:45 -06001120void tracing_snapshot_instance(struct trace_array *tr)
1121{
1122 tracing_snapshot_instance_cond(tr, NULL);
1123}
1124
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001125/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001126 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001127 *
1128 * This causes a swap between the snapshot buffer and the current live
1129 * tracing buffer. You can use this to take snapshots of the live
1130 * trace when some condition is triggered, but continue to trace.
1131 *
1132 * Note, make sure to allocate the snapshot with either
1133 * a tracing_snapshot_alloc(), or by doing it manually
1134 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1135 *
1136 * If the snapshot buffer is not allocated, it will stop tracing.
1137 * Basically making a permanent snapshot.
1138 */
1139void tracing_snapshot(void)
1140{
1141 struct trace_array *tr = &global_trace;
1142
1143 tracing_snapshot_instance(tr);
1144}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001145EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001146
Tom Zanussia35873a2019-02-13 17:42:45 -06001147/**
1148 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1149 * @tr: The tracing instance to snapshot
1150 * @cond_data: The data to be tested conditionally, and possibly saved
1151 *
1152 * This is the same as tracing_snapshot() except that the snapshot is
1153 * conditional - the snapshot will only happen if the
1154 * cond_snapshot.update() implementation receiving the cond_data
1155 * returns true, which means that the trace array's cond_snapshot
1156 * update() operation used the cond_data to determine whether the
1157 * snapshot should be taken, and if it was, presumably saved it along
1158 * with the snapshot.
1159 */
1160void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1161{
1162 tracing_snapshot_instance_cond(tr, cond_data);
1163}
1164EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1165
1166/**
1167 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1168 * @tr: The tracing instance
1169 *
1170 * When the user enables a conditional snapshot using
1171 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1172 * with the snapshot. This accessor is used to retrieve it.
1173 *
1174 * Should not be called from cond_snapshot.update(), since it takes
1175 * the tr->max_lock lock, which the code calling
1176 * cond_snapshot.update() has already done.
1177 *
1178 * Returns the cond_data associated with the trace array's snapshot.
1179 */
1180void *tracing_cond_snapshot_data(struct trace_array *tr)
1181{
1182 void *cond_data = NULL;
1183
1184 arch_spin_lock(&tr->max_lock);
1185
1186 if (tr->cond_snapshot)
1187 cond_data = tr->cond_snapshot->cond_data;
1188
1189 arch_spin_unlock(&tr->max_lock);
1190
1191 return cond_data;
1192}
1193EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1194
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001195static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1196 struct array_buffer *size_buf, int cpu_id);
1197static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001198
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001199int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001200{
1201 int ret;
1202
1203 if (!tr->allocated_snapshot) {
1204
1205 /* allocate spare buffer */
1206 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001207 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001208 if (ret < 0)
1209 return ret;
1210
1211 tr->allocated_snapshot = true;
1212 }
1213
1214 return 0;
1215}
1216
Fabian Frederickad1438a2014-04-17 21:44:42 +02001217static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001218{
1219 /*
1220 * We don't free the ring buffer. instead, resize it because
1221 * The max_tr ring buffer has some state (e.g. ring->clock) and
1222 * we want preserve it.
1223 */
1224 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1225 set_buffer_entries(&tr->max_buffer, 1);
1226 tracing_reset_online_cpus(&tr->max_buffer);
1227 tr->allocated_snapshot = false;
1228}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001229
1230/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001231 * tracing_alloc_snapshot - allocate snapshot buffer.
1232 *
1233 * This only allocates the snapshot buffer if it isn't already
1234 * allocated - it doesn't also take a snapshot.
1235 *
1236 * This is meant to be used in cases where the snapshot buffer needs
1237 * to be set up for events that can't sleep but need to be able to
1238 * trigger a snapshot.
1239 */
1240int tracing_alloc_snapshot(void)
1241{
1242 struct trace_array *tr = &global_trace;
1243 int ret;
1244
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001245 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001246 WARN_ON(ret < 0);
1247
1248 return ret;
1249}
1250EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1251
1252/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001253 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001254 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001255 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001256 * snapshot buffer if it isn't already allocated. Use this only
1257 * where it is safe to sleep, as the allocation may sleep.
1258 *
1259 * This causes a swap between the snapshot buffer and the current live
1260 * tracing buffer. You can use this to take snapshots of the live
1261 * trace when some condition is triggered, but continue to trace.
1262 */
1263void tracing_snapshot_alloc(void)
1264{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001265 int ret;
1266
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001267 ret = tracing_alloc_snapshot();
1268 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001269 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001270
1271 tracing_snapshot();
1272}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001273EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001274
1275/**
1276 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1277 * @tr: The tracing instance
1278 * @cond_data: User data to associate with the snapshot
1279 * @update: Implementation of the cond_snapshot update function
1280 *
1281 * Check whether the conditional snapshot for the given instance has
1282 * already been enabled, or if the current tracer is already using a
1283 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1284 * save the cond_data and update function inside.
1285 *
1286 * Returns 0 if successful, error otherwise.
1287 */
1288int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1289 cond_update_fn_t update)
1290{
1291 struct cond_snapshot *cond_snapshot;
1292 int ret = 0;
1293
1294 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1295 if (!cond_snapshot)
1296 return -ENOMEM;
1297
1298 cond_snapshot->cond_data = cond_data;
1299 cond_snapshot->update = update;
1300
1301 mutex_lock(&trace_types_lock);
1302
1303 ret = tracing_alloc_snapshot_instance(tr);
1304 if (ret)
1305 goto fail_unlock;
1306
1307 if (tr->current_trace->use_max_tr) {
1308 ret = -EBUSY;
1309 goto fail_unlock;
1310 }
1311
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001312 /*
1313 * The cond_snapshot can only change to NULL without the
1314 * trace_types_lock. We don't care if we race with it going
1315 * to NULL, but we want to make sure that it's not set to
1316 * something other than NULL when we get here, which we can
1317 * do safely with only holding the trace_types_lock and not
1318 * having to take the max_lock.
1319 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001320 if (tr->cond_snapshot) {
1321 ret = -EBUSY;
1322 goto fail_unlock;
1323 }
1324
1325 arch_spin_lock(&tr->max_lock);
1326 tr->cond_snapshot = cond_snapshot;
1327 arch_spin_unlock(&tr->max_lock);
1328
1329 mutex_unlock(&trace_types_lock);
1330
1331 return ret;
1332
1333 fail_unlock:
1334 mutex_unlock(&trace_types_lock);
1335 kfree(cond_snapshot);
1336 return ret;
1337}
1338EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1339
1340/**
1341 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1342 * @tr: The tracing instance
1343 *
1344 * Check whether the conditional snapshot for the given instance is
1345 * enabled; if so, free the cond_snapshot associated with it,
1346 * otherwise return -EINVAL.
1347 *
1348 * Returns 0 if successful, error otherwise.
1349 */
1350int tracing_snapshot_cond_disable(struct trace_array *tr)
1351{
1352 int ret = 0;
1353
1354 arch_spin_lock(&tr->max_lock);
1355
1356 if (!tr->cond_snapshot)
1357 ret = -EINVAL;
1358 else {
1359 kfree(tr->cond_snapshot);
1360 tr->cond_snapshot = NULL;
1361 }
1362
1363 arch_spin_unlock(&tr->max_lock);
1364
1365 return ret;
1366}
1367EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001368#else
1369void tracing_snapshot(void)
1370{
1371 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1372}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001373EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001374void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1375{
1376 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1377}
1378EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001379int tracing_alloc_snapshot(void)
1380{
1381 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1382 return -ENODEV;
1383}
1384EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001385void tracing_snapshot_alloc(void)
1386{
1387 /* Give warning */
1388 tracing_snapshot();
1389}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001390EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001391void *tracing_cond_snapshot_data(struct trace_array *tr)
1392{
1393 return NULL;
1394}
1395EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1396int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1397{
1398 return -ENODEV;
1399}
1400EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1401int tracing_snapshot_cond_disable(struct trace_array *tr)
1402{
1403 return false;
1404}
1405EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001406#endif /* CONFIG_TRACER_SNAPSHOT */
1407
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001408void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001409{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001410 if (tr->array_buffer.buffer)
1411 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001412 /*
1413 * This flag is looked at when buffers haven't been allocated
1414 * yet, or by some tracers (like irqsoff), that just want to
1415 * know if the ring buffer has been disabled, but it can handle
1416 * races of where it gets disabled but we still do a record.
1417 * As the check is in the fast path of the tracers, it is more
1418 * important to be fast than accurate.
1419 */
1420 tr->buffer_disabled = 1;
1421 /* Make the flag seen by readers */
1422 smp_wmb();
1423}
1424
Steven Rostedt499e5472012-02-22 15:50:28 -05001425/**
1426 * tracing_off - turn off tracing buffers
1427 *
1428 * This function stops the tracing buffers from recording data.
1429 * It does not disable any overhead the tracers themselves may
1430 * be causing. This function simply causes all recording to
1431 * the ring buffers to fail.
1432 */
1433void tracing_off(void)
1434{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001435 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001436}
1437EXPORT_SYMBOL_GPL(tracing_off);
1438
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001439void disable_trace_on_warning(void)
1440{
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001441 if (__disable_trace_on_warning) {
1442 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1443 "Disabling tracing due to warning\n");
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001444 tracing_off();
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001445 }
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001446}
1447
Steven Rostedt499e5472012-02-22 15:50:28 -05001448/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001449 * tracer_tracing_is_on - show real state of ring buffer enabled
1450 * @tr : the trace array to know if ring buffer is enabled
1451 *
1452 * Shows real state of the ring buffer if it is enabled or not.
1453 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001454bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001455{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001456 if (tr->array_buffer.buffer)
1457 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001458 return !tr->buffer_disabled;
1459}
1460
Steven Rostedt499e5472012-02-22 15:50:28 -05001461/**
1462 * tracing_is_on - show state of ring buffers enabled
1463 */
1464int tracing_is_on(void)
1465{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001466 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001467}
1468EXPORT_SYMBOL_GPL(tracing_is_on);
1469
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001470static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001471{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001472 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001473
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001474 if (!str)
1475 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001476 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001477 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001478 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001479 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001480 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001481 return 1;
1482}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001483__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484
Tim Bird0e950172010-02-25 15:36:43 -08001485static int __init set_tracing_thresh(char *str)
1486{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001487 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001488 int ret;
1489
1490 if (!str)
1491 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001492 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001493 if (ret < 0)
1494 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001495 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001496 return 1;
1497}
1498__setup("tracing_thresh=", set_tracing_thresh);
1499
Steven Rostedt57f50be2008-05-12 21:20:44 +02001500unsigned long nsecs_to_usecs(unsigned long nsecs)
1501{
1502 return nsecs / 1000;
1503}
1504
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001505/*
1506 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001507 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001508 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001509 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001510 */
1511#undef C
1512#define C(a, b) b
1513
Ingo Molnarf2cc0202021-03-23 18:49:35 +01001514/* These must match the bit positions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001516 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517 NULL
1518};
1519
Zhaolei5079f322009-08-25 16:12:56 +08001520static struct {
1521 u64 (*func)(void);
1522 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001523 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001524} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001525 { trace_clock_local, "local", 1 },
1526 { trace_clock_global, "global", 1 },
1527 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001528 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001529 { trace_clock, "perf", 1 },
1530 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001531 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001532 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001533 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001534};
1535
Tom Zanussi860f9f62018-01-15 20:51:48 -06001536bool trace_clock_in_ns(struct trace_array *tr)
1537{
1538 if (trace_clocks[tr->clock_id].in_ns)
1539 return true;
1540
1541 return false;
1542}
1543
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001544/*
1545 * trace_parser_get_init - gets the buffer for trace parser
1546 */
1547int trace_parser_get_init(struct trace_parser *parser, int size)
1548{
1549 memset(parser, 0, sizeof(*parser));
1550
1551 parser->buffer = kmalloc(size, GFP_KERNEL);
1552 if (!parser->buffer)
1553 return 1;
1554
1555 parser->size = size;
1556 return 0;
1557}
1558
1559/*
1560 * trace_parser_put - frees the buffer for trace parser
1561 */
1562void trace_parser_put(struct trace_parser *parser)
1563{
1564 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001565 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001566}
1567
1568/*
1569 * trace_get_user - reads the user input string separated by space
1570 * (matched by isspace(ch))
1571 *
1572 * For each string found the 'struct trace_parser' is updated,
1573 * and the function returns.
1574 *
1575 * Returns number of bytes read.
1576 *
1577 * See kernel/trace/trace.h for 'struct trace_parser' details.
1578 */
1579int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1580 size_t cnt, loff_t *ppos)
1581{
1582 char ch;
1583 size_t read = 0;
1584 ssize_t ret;
1585
1586 if (!*ppos)
1587 trace_parser_clear(parser);
1588
1589 ret = get_user(ch, ubuf++);
1590 if (ret)
1591 goto out;
1592
1593 read++;
1594 cnt--;
1595
1596 /*
1597 * The parser is not finished with the last write,
1598 * continue reading the user input without skipping spaces.
1599 */
1600 if (!parser->cont) {
1601 /* skip white space */
1602 while (cnt && isspace(ch)) {
1603 ret = get_user(ch, ubuf++);
1604 if (ret)
1605 goto out;
1606 read++;
1607 cnt--;
1608 }
1609
Changbin Du76638d92018-01-16 17:02:29 +08001610 parser->idx = 0;
1611
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001612 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001613 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001614 *ppos += read;
1615 ret = read;
1616 goto out;
1617 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001618 }
1619
1620 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001621 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001622 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001623 parser->buffer[parser->idx++] = ch;
1624 else {
1625 ret = -EINVAL;
1626 goto out;
1627 }
1628 ret = get_user(ch, ubuf++);
1629 if (ret)
1630 goto out;
1631 read++;
1632 cnt--;
1633 }
1634
1635 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001636 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001637 parser->buffer[parser->idx] = 0;
1638 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001639 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001640 parser->cont = true;
1641 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001642 /* Make sure the parsed string always terminates with '\0'. */
1643 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001644 } else {
1645 ret = -EINVAL;
1646 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001647 }
1648
1649 *ppos += read;
1650 ret = read;
1651
1652out:
1653 return ret;
1654}
1655
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001656/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001657static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001658{
1659 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001660
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001661 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001662 return -EBUSY;
1663
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001664 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001665 if (cnt > len)
1666 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001667 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001668
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001669 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001670 return cnt;
1671}
1672
Tim Bird0e950172010-02-25 15:36:43 -08001673unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001674static const struct file_operations tracing_max_lat_fops;
1675
Steven Rostedt (VMware)6880c982021-06-25 19:47:33 -04001676#ifdef LATENCY_FS_NOTIFY
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001677
1678static struct workqueue_struct *fsnotify_wq;
1679
1680static void latency_fsnotify_workfn(struct work_struct *work)
1681{
1682 struct trace_array *tr = container_of(work, struct trace_array,
1683 fsnotify_work);
Amir Goldstein82ace1e2020-07-22 15:58:44 +03001684 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001685}
1686
1687static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1688{
1689 struct trace_array *tr = container_of(iwork, struct trace_array,
1690 fsnotify_irqwork);
1691 queue_work(fsnotify_wq, &tr->fsnotify_work);
1692}
1693
1694static void trace_create_maxlat_file(struct trace_array *tr,
1695 struct dentry *d_tracer)
1696{
1697 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1698 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04001699 tr->d_max_latency = trace_create_file("tracing_max_latency",
1700 TRACE_MODE_WRITE,
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001701 d_tracer, &tr->max_latency,
1702 &tracing_max_lat_fops);
1703}
1704
1705__init static int latency_fsnotify_init(void)
1706{
1707 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1708 WQ_UNBOUND | WQ_HIGHPRI, 0);
1709 if (!fsnotify_wq) {
1710 pr_err("Unable to allocate tr_max_lat_wq\n");
1711 return -ENOMEM;
1712 }
1713 return 0;
1714}
1715
1716late_initcall_sync(latency_fsnotify_init);
1717
1718void latency_fsnotify(struct trace_array *tr)
1719{
1720 if (!fsnotify_wq)
1721 return;
1722 /*
1723 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1724 * possible that we are called from __schedule() or do_idle(), which
1725 * could cause a deadlock.
1726 */
1727 irq_work_queue(&tr->fsnotify_irqwork);
1728}
1729
Jackie Liu424b6502021-09-22 10:51:22 +08001730#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1731 || defined(CONFIG_OSNOISE_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001732
1733#define trace_create_maxlat_file(tr, d_tracer) \
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04001734 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1735 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001736
Jackie Liu424b6502021-09-22 10:51:22 +08001737#else
1738#define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001739#endif
Tim Bird0e950172010-02-25 15:36:43 -08001740
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001741#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001742/*
1743 * Copy the new maximum trace into the separate maximum-trace
1744 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001745 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001746 */
1747static void
1748__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1749{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001750 struct array_buffer *trace_buf = &tr->array_buffer;
1751 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001752 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1753 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001754
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001755 max_buf->cpu = cpu;
1756 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001757
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001758 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001759 max_data->critical_start = data->critical_start;
1760 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001761
Tom Zanussi85f726a2019-03-05 10:12:00 -06001762 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001763 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001764 /*
1765 * If tsk == current, then use current_uid(), as that does not use
1766 * RCU. The irq tracer can be called out of RCU scope.
1767 */
1768 if (tsk == current)
1769 max_data->uid = current_uid();
1770 else
1771 max_data->uid = task_uid(tsk);
1772
Steven Rostedt8248ac02009-09-02 12:27:41 -04001773 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1774 max_data->policy = tsk->policy;
1775 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001776
1777 /* record this tasks comm */
1778 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001779 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001780}
1781
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001782/**
1783 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1784 * @tr: tracer
1785 * @tsk: the task with the latency
1786 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001787 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001788 *
1789 * Flip the buffers between the @tr and the max_tr and record information
1790 * about which task was the cause of this latency.
1791 */
Ingo Molnare309b412008-05-12 21:20:51 +02001792void
Tom Zanussia35873a2019-02-13 17:42:45 -06001793update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1794 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001795{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001796 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001797 return;
1798
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001799 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001800
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001801 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001802 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001803 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001804 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001805 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001806
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001807 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001808
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001809 /* Inherit the recordable setting from array_buffer */
1810 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001811 ring_buffer_record_on(tr->max_buffer.buffer);
1812 else
1813 ring_buffer_record_off(tr->max_buffer.buffer);
1814
Tom Zanussia35873a2019-02-13 17:42:45 -06001815#ifdef CONFIG_TRACER_SNAPSHOT
1816 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1817 goto out_unlock;
1818#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001819 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001820
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001821 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001822
1823 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001824 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001825}
1826
1827/**
1828 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001829 * @tr: tracer
1830 * @tsk: task with the latency
1831 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001832 *
1833 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001834 */
Ingo Molnare309b412008-05-12 21:20:51 +02001835void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001836update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1837{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001838 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001839
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001840 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001841 return;
1842
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001843 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001844 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001845 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001846 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001847 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001848 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001849
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001850 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001851
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001852 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001853
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001854 if (ret == -EBUSY) {
1855 /*
1856 * We failed to swap the buffer due to a commit taking
1857 * place on this CPU. We fail to record, but we reset
1858 * the max trace buffer (no one writes directly to it)
1859 * and flag that it failed.
1860 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001861 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001862 "Failed to swap buffers due to commit in progress\n");
1863 }
1864
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001865 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001866
1867 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001868 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001869}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001870#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001871
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001872static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001873{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001874 /* Iterators are static, they should be filled or empty */
1875 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001876 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001877
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001878 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001879 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001880}
1881
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001882#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001883static bool selftests_can_run;
1884
1885struct trace_selftests {
1886 struct list_head list;
1887 struct tracer *type;
1888};
1889
1890static LIST_HEAD(postponed_selftests);
1891
1892static int save_selftest(struct tracer *type)
1893{
1894 struct trace_selftests *selftest;
1895
1896 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1897 if (!selftest)
1898 return -ENOMEM;
1899
1900 selftest->type = type;
1901 list_add(&selftest->list, &postponed_selftests);
1902 return 0;
1903}
1904
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001905static int run_tracer_selftest(struct tracer *type)
1906{
1907 struct trace_array *tr = &global_trace;
1908 struct tracer *saved_tracer = tr->current_trace;
1909 int ret;
1910
1911 if (!type->selftest || tracing_selftest_disabled)
1912 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001913
1914 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001915 * If a tracer registers early in boot up (before scheduling is
1916 * initialized and such), then do not run its selftests yet.
1917 * Instead, run it a little later in the boot process.
1918 */
1919 if (!selftests_can_run)
1920 return save_selftest(type);
1921
Steven Rostedt (VMware)ee666a12021-03-01 10:49:35 -05001922 if (!tracing_is_on()) {
1923 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1924 type->name);
1925 return 0;
1926 }
1927
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001928 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001929 * Run a selftest on this tracer.
1930 * Here we reset the trace buffer, and set the current
1931 * tracer to be this tracer. The tracer can then run some
1932 * internal tracing to verify that everything is in order.
1933 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001934 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001935 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001936
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001937 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001938
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001939#ifdef CONFIG_TRACER_MAX_TRACE
1940 if (type->use_max_tr) {
1941 /* If we expanded the buffers, make sure the max is expanded too */
1942 if (ring_buffer_expanded)
1943 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1944 RING_BUFFER_ALL_CPUS);
1945 tr->allocated_snapshot = true;
1946 }
1947#endif
1948
1949 /* the test is responsible for initializing and enabling */
1950 pr_info("Testing tracer %s: ", type->name);
1951 ret = type->selftest(type, tr);
1952 /* the test is responsible for resetting too */
1953 tr->current_trace = saved_tracer;
1954 if (ret) {
1955 printk(KERN_CONT "FAILED!\n");
1956 /* Add the warning after printing 'FAILED' */
1957 WARN_ON(1);
1958 return -1;
1959 }
1960 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001961 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001962
1963#ifdef CONFIG_TRACER_MAX_TRACE
1964 if (type->use_max_tr) {
1965 tr->allocated_snapshot = false;
1966
1967 /* Shrink the max buffer again */
1968 if (ring_buffer_expanded)
1969 ring_buffer_resize(tr->max_buffer.buffer, 1,
1970 RING_BUFFER_ALL_CPUS);
1971 }
1972#endif
1973
1974 printk(KERN_CONT "PASSED\n");
1975 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001976}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001977
1978static __init int init_trace_selftests(void)
1979{
1980 struct trace_selftests *p, *n;
1981 struct tracer *t, **last;
1982 int ret;
1983
1984 selftests_can_run = true;
1985
1986 mutex_lock(&trace_types_lock);
1987
1988 if (list_empty(&postponed_selftests))
1989 goto out;
1990
1991 pr_info("Running postponed tracer tests:\n");
1992
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001993 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001994 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001995 /* This loop can take minutes when sanitizers are enabled, so
1996 * lets make sure we allow RCU processing.
1997 */
1998 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001999 ret = run_tracer_selftest(p->type);
2000 /* If the test fails, then warn and remove from available_tracers */
2001 if (ret < 0) {
2002 WARN(1, "tracer: %s failed selftest, disabling\n",
2003 p->type->name);
2004 last = &trace_types;
2005 for (t = trace_types; t; t = t->next) {
2006 if (t == p->type) {
2007 *last = t->next;
2008 break;
2009 }
2010 last = &t->next;
2011 }
2012 }
2013 list_del(&p->list);
2014 kfree(p);
2015 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002016 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002017
2018 out:
2019 mutex_unlock(&trace_types_lock);
2020
2021 return 0;
2022}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04002023core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002024#else
2025static inline int run_tracer_selftest(struct tracer *type)
2026{
2027 return 0;
2028}
2029#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002030
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002031static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2032
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002033static void __init apply_trace_boot_options(void);
2034
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002035/**
2036 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002037 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002038 *
2039 * Register a new plugin tracer.
2040 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002041int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002042{
2043 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002044 int ret = 0;
2045
2046 if (!type->name) {
2047 pr_info("Tracer must have a name\n");
2048 return -1;
2049 }
2050
Dan Carpenter24a461d2010-07-10 12:06:44 +02002051 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08002052 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2053 return -1;
2054 }
2055
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002056 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11002057 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002058 type->name);
2059 return -EPERM;
2060 }
2061
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002062 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01002063
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002064 tracing_selftest_running = true;
2065
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002066 for (t = trace_types; t; t = t->next) {
2067 if (strcmp(type->name, t->name) == 0) {
2068 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08002069 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002070 type->name);
2071 ret = -1;
2072 goto out;
2073 }
2074 }
2075
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002076 if (!type->set_flag)
2077 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08002078 if (!type->flags) {
2079 /*allocate a dummy tracer_flags*/
2080 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08002081 if (!type->flags) {
2082 ret = -ENOMEM;
2083 goto out;
2084 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08002085 type->flags->val = 0;
2086 type->flags->opts = dummy_tracer_opt;
2087 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002088 if (!type->flags->opts)
2089 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002090
Chunyu Hud39cdd22016-03-08 21:37:01 +08002091 /* store the tracer for __set_tracer_option */
2092 type->flags->trace = type;
2093
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002094 ret = run_tracer_selftest(type);
2095 if (ret < 0)
2096 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02002097
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002098 type->next = trace_types;
2099 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002100 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02002101
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002102 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002103 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002104 mutex_unlock(&trace_types_lock);
2105
Steven Rostedtdac74942009-02-05 01:13:38 -05002106 if (ret || !default_bootup_tracer)
2107 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002108
Li Zefanee6c2c12009-09-18 14:06:47 +08002109 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05002110 goto out_unlock;
2111
2112 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2113 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05002114 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05002115 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002116
2117 apply_trace_boot_options();
2118
Steven Rostedtdac74942009-02-05 01:13:38 -05002119 /* disable other selftests, since this will break it. */
Masami Hiramatsu60efe212020-12-08 17:54:09 +09002120 disable_tracing_selftest("running a tracer");
Steven Rostedtdac74942009-02-05 01:13:38 -05002121
2122 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002123 return ret;
2124}
2125
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002126static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04002127{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002128 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04002129
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002130 if (!buffer)
2131 return;
2132
Steven Rostedtf6339032009-09-04 12:35:16 -04002133 ring_buffer_record_disable(buffer);
2134
2135 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002136 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04002137 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04002138
2139 ring_buffer_record_enable(buffer);
2140}
2141
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002142void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002143{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002144 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002145
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002146 if (!buffer)
2147 return;
2148
Steven Rostedt621968c2009-09-04 12:02:35 -04002149 ring_buffer_record_disable(buffer);
2150
2151 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002152 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002153
Alexander Z Lam94571582013-08-02 18:36:16 -07002154 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002155
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +10002156 ring_buffer_reset_online_cpus(buffer);
Steven Rostedt621968c2009-09-04 12:02:35 -04002157
2158 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002159}
2160
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002161/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002162void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002163{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002164 struct trace_array *tr;
2165
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002166 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002167 if (!tr->clear_trace)
2168 continue;
2169 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002170 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002171#ifdef CONFIG_TRACER_MAX_TRACE
2172 tracing_reset_online_cpus(&tr->max_buffer);
2173#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002174 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002175}
2176
Paul Burton4030a6e2021-07-01 10:24:07 -07002177/*
2178 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2179 * is the tgid last observed corresponding to pid=i.
2180 */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002181static int *tgid_map;
2182
Paul Burton4030a6e2021-07-01 10:24:07 -07002183/* The maximum valid index into tgid_map. */
2184static size_t tgid_map_max;
2185
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002186#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002187#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002188static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002189struct saved_cmdlines_buffer {
2190 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2191 unsigned *map_cmdline_to_pid;
2192 unsigned cmdline_num;
2193 int cmdline_idx;
2194 char *saved_cmdlines;
2195};
2196static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002197
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002198static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002199{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002200 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2201}
2202
2203static inline void set_cmdline(int idx, const char *cmdline)
2204{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002205 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002206}
2207
2208static int allocate_cmdlines_buffer(unsigned int val,
2209 struct saved_cmdlines_buffer *s)
2210{
Kees Cook6da2ec52018-06-12 13:55:00 -07002211 s->map_cmdline_to_pid = kmalloc_array(val,
2212 sizeof(*s->map_cmdline_to_pid),
2213 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002214 if (!s->map_cmdline_to_pid)
2215 return -ENOMEM;
2216
Kees Cook6da2ec52018-06-12 13:55:00 -07002217 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002218 if (!s->saved_cmdlines) {
2219 kfree(s->map_cmdline_to_pid);
2220 return -ENOMEM;
2221 }
2222
2223 s->cmdline_idx = 0;
2224 s->cmdline_num = val;
2225 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2226 sizeof(s->map_pid_to_cmdline));
2227 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2228 val * sizeof(*s->map_cmdline_to_pid));
2229
2230 return 0;
2231}
2232
2233static int trace_create_savedcmd(void)
2234{
2235 int ret;
2236
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002237 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002238 if (!savedcmd)
2239 return -ENOMEM;
2240
2241 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2242 if (ret < 0) {
2243 kfree(savedcmd);
2244 savedcmd = NULL;
2245 return -ENOMEM;
2246 }
2247
2248 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002249}
2250
Carsten Emdeb5130b12009-09-13 01:43:07 +02002251int is_tracing_stopped(void)
2252{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002253 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002254}
2255
Steven Rostedt0f048702008-11-05 16:05:44 -05002256/**
2257 * tracing_start - quick start of the tracer
2258 *
2259 * If tracing is enabled but was stopped by tracing_stop,
2260 * this will start the tracer back up.
2261 */
2262void tracing_start(void)
2263{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002264 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002265 unsigned long flags;
2266
2267 if (tracing_disabled)
2268 return;
2269
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002270 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2271 if (--global_trace.stop_count) {
2272 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002273 /* Someone screwed up their debugging */
2274 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002275 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002276 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002277 goto out;
2278 }
2279
Steven Rostedta2f80712010-03-12 19:56:00 -05002280 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002281 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002282
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002283 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002284 if (buffer)
2285 ring_buffer_record_enable(buffer);
2286
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002287#ifdef CONFIG_TRACER_MAX_TRACE
2288 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002289 if (buffer)
2290 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002291#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002292
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002293 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002294
Steven Rostedt0f048702008-11-05 16:05:44 -05002295 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002296 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2297}
2298
2299static void tracing_start_tr(struct trace_array *tr)
2300{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002301 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002302 unsigned long flags;
2303
2304 if (tracing_disabled)
2305 return;
2306
2307 /* If global, we need to also start the max tracer */
2308 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2309 return tracing_start();
2310
2311 raw_spin_lock_irqsave(&tr->start_lock, flags);
2312
2313 if (--tr->stop_count) {
2314 if (tr->stop_count < 0) {
2315 /* Someone screwed up their debugging */
2316 WARN_ON_ONCE(1);
2317 tr->stop_count = 0;
2318 }
2319 goto out;
2320 }
2321
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002322 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002323 if (buffer)
2324 ring_buffer_record_enable(buffer);
2325
2326 out:
2327 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002328}
2329
2330/**
2331 * tracing_stop - quick stop of the tracer
2332 *
2333 * Light weight way to stop tracing. Use in conjunction with
2334 * tracing_start.
2335 */
2336void tracing_stop(void)
2337{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002338 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002339 unsigned long flags;
2340
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002341 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2342 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002343 goto out;
2344
Steven Rostedta2f80712010-03-12 19:56:00 -05002345 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002346 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002347
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002348 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002349 if (buffer)
2350 ring_buffer_record_disable(buffer);
2351
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002352#ifdef CONFIG_TRACER_MAX_TRACE
2353 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002354 if (buffer)
2355 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002356#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002357
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002358 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002359
Steven Rostedt0f048702008-11-05 16:05:44 -05002360 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002361 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2362}
2363
2364static void tracing_stop_tr(struct trace_array *tr)
2365{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002366 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002367 unsigned long flags;
2368
2369 /* If global, we need to also stop the max tracer */
2370 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2371 return tracing_stop();
2372
2373 raw_spin_lock_irqsave(&tr->start_lock, flags);
2374 if (tr->stop_count++)
2375 goto out;
2376
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002377 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002378 if (buffer)
2379 ring_buffer_record_disable(buffer);
2380
2381 out:
2382 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002383}
2384
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002385static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002386{
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002387 unsigned tpid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002388
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002389 /* treat recording of idle task as a success */
2390 if (!tsk->pid)
2391 return 1;
2392
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002393 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002394
2395 /*
2396 * It's not the end of the world if we don't get
2397 * the lock, but we also don't want to spin
2398 * nor do we want to disable interrupts,
2399 * so if we miss here, then better luck next time.
2400 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002401 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002402 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002403
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002404 idx = savedcmd->map_pid_to_cmdline[tpid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002405 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002406 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002407
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002408 savedcmd->map_pid_to_cmdline[tpid] = idx;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002409 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002410 }
2411
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002412 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002413 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002414
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002415 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002416
2417 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418}
2419
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002420static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002422 unsigned map;
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002423 int tpid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002424
Steven Rostedt4ca530852009-03-16 19:20:15 -04002425 if (!pid) {
2426 strcpy(comm, "<idle>");
2427 return;
2428 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002429
Steven Rostedt74bf4072010-01-25 15:11:53 -05002430 if (WARN_ON_ONCE(pid < 0)) {
2431 strcpy(comm, "<XXX>");
2432 return;
2433 }
2434
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002435 tpid = pid & (PID_MAX_DEFAULT - 1);
2436 map = savedcmd->map_pid_to_cmdline[tpid];
2437 if (map != NO_CMDLINE_MAP) {
2438 tpid = savedcmd->map_cmdline_to_pid[map];
2439 if (tpid == pid) {
2440 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2441 return;
2442 }
Steven Rostedt4ca530852009-03-16 19:20:15 -04002443 }
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002444 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002445}
2446
2447void trace_find_cmdline(int pid, char comm[])
2448{
2449 preempt_disable();
2450 arch_spin_lock(&trace_cmdline_lock);
2451
2452 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002454 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002455 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002456}
2457
Paul Burton4030a6e2021-07-01 10:24:07 -07002458static int *trace_find_tgid_ptr(int pid)
2459{
2460 /*
2461 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2462 * if we observe a non-NULL tgid_map then we also observe the correct
2463 * tgid_map_max.
2464 */
2465 int *map = smp_load_acquire(&tgid_map);
2466
2467 if (unlikely(!map || pid > tgid_map_max))
2468 return NULL;
2469
2470 return &map[pid];
2471}
2472
Joel Fernandesd914ba32017-06-26 19:01:55 -07002473int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002474{
Paul Burton4030a6e2021-07-01 10:24:07 -07002475 int *ptr = trace_find_tgid_ptr(pid);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002476
Paul Burton4030a6e2021-07-01 10:24:07 -07002477 return ptr ? *ptr : 0;
Joel Fernandesd914ba32017-06-26 19:01:55 -07002478}
2479
2480static int trace_save_tgid(struct task_struct *tsk)
2481{
Paul Burton4030a6e2021-07-01 10:24:07 -07002482 int *ptr;
2483
Joel Fernandesbd45d342017-07-06 16:00:22 -07002484 /* treat recording of idle task as a success */
2485 if (!tsk->pid)
2486 return 1;
2487
Paul Burton4030a6e2021-07-01 10:24:07 -07002488 ptr = trace_find_tgid_ptr(tsk->pid);
2489 if (!ptr)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002490 return 0;
2491
Paul Burton4030a6e2021-07-01 10:24:07 -07002492 *ptr = tsk->tgid;
Joel Fernandesd914ba32017-06-26 19:01:55 -07002493 return 1;
2494}
2495
2496static bool tracing_record_taskinfo_skip(int flags)
2497{
2498 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2499 return true;
Joel Fernandesd914ba32017-06-26 19:01:55 -07002500 if (!__this_cpu_read(trace_taskinfo_save))
2501 return true;
2502 return false;
2503}
2504
2505/**
2506 * tracing_record_taskinfo - record the task info of a task
2507 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002508 * @task: task to record
2509 * @flags: TRACE_RECORD_CMDLINE for recording comm
2510 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002511 */
2512void tracing_record_taskinfo(struct task_struct *task, int flags)
2513{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002514 bool done;
2515
Joel Fernandesd914ba32017-06-26 19:01:55 -07002516 if (tracing_record_taskinfo_skip(flags))
2517 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002518
2519 /*
2520 * Record as much task information as possible. If some fail, continue
2521 * to try to record the others.
2522 */
2523 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2524 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2525
2526 /* If recording any information failed, retry again soon. */
2527 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528 return;
2529
Joel Fernandesd914ba32017-06-26 19:01:55 -07002530 __this_cpu_write(trace_taskinfo_save, false);
2531}
2532
2533/**
2534 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2535 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002536 * @prev: previous task during sched_switch
2537 * @next: next task during sched_switch
2538 * @flags: TRACE_RECORD_CMDLINE for recording comm
2539 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002540 */
2541void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2542 struct task_struct *next, int flags)
2543{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002544 bool done;
2545
Joel Fernandesd914ba32017-06-26 19:01:55 -07002546 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002547 return;
2548
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002549 /*
2550 * Record as much task information as possible. If some fail, continue
2551 * to try to record the others.
2552 */
2553 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2554 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2555 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2556 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002557
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002558 /* If recording any information failed, retry again soon. */
2559 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002560 return;
2561
2562 __this_cpu_write(trace_taskinfo_save, false);
2563}
2564
2565/* Helpers to record a specific task information */
2566void tracing_record_cmdline(struct task_struct *task)
2567{
2568 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2569}
2570
2571void tracing_record_tgid(struct task_struct *task)
2572{
2573 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574}
2575
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002576/*
2577 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2578 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2579 * simplifies those functions and keeps them in sync.
2580 */
2581enum print_line_t trace_handle_return(struct trace_seq *s)
2582{
2583 return trace_seq_has_overflowed(s) ?
2584 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2585}
2586EXPORT_SYMBOL_GPL(trace_handle_return);
2587
Thomas Gleixner54357f02021-08-10 15:26:25 +02002588static unsigned short migration_disable_value(void)
2589{
2590#if defined(CONFIG_SMP)
2591 return current->migration_disabled;
2592#else
2593 return 0;
2594#endif
2595}
2596
Sebastian Andrzej Siewior0c020062021-01-25 20:45:09 +01002597unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598{
Sebastian Andrzej Siewior0c020062021-01-25 20:45:09 +01002599 unsigned int trace_flags = irqs_status;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002600 unsigned int pc;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002601
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002602 pc = preempt_count();
2603
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002604 if (pc & NMI_MASK)
2605 trace_flags |= TRACE_FLAG_NMI;
2606 if (pc & HARDIRQ_MASK)
2607 trace_flags |= TRACE_FLAG_HARDIRQ;
Sebastian Andrzej Siewiorfe427882021-01-25 20:45:10 +01002608 if (in_serving_softirq())
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002609 trace_flags |= TRACE_FLAG_SOFTIRQ;
Sebastian Andrzej Siewior289e7b02021-12-13 11:08:53 +01002610 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2611 trace_flags |= TRACE_FLAG_BH_OFF;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002612
2613 if (tif_need_resched())
2614 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2615 if (test_preempt_need_resched())
2616 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
Thomas Gleixner54357f02021-08-10 15:26:25 +02002617 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2618 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002619}
2620
Steven Rostedte77405a2009-09-02 14:17:06 -04002621struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002622trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002623 int type,
2624 unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002625 unsigned int trace_ctx)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002626{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002627 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002628}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002629
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002630DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2631DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2632static int trace_buffered_event_ref;
2633
2634/**
2635 * trace_buffered_event_enable - enable buffering events
2636 *
2637 * When events are being filtered, it is quicker to use a temporary
2638 * buffer to write the event data into if there's a likely chance
2639 * that it will not be committed. The discard of the ring buffer
2640 * is not as fast as committing, and is much slower than copying
2641 * a commit.
2642 *
2643 * When an event is to be filtered, allocate per cpu buffers to
2644 * write the event data into, and if the event is filtered and discarded
2645 * it is simply dropped, otherwise, the entire data is to be committed
2646 * in one shot.
2647 */
2648void trace_buffered_event_enable(void)
2649{
2650 struct ring_buffer_event *event;
2651 struct page *page;
2652 int cpu;
2653
2654 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2655
2656 if (trace_buffered_event_ref++)
2657 return;
2658
2659 for_each_tracing_cpu(cpu) {
2660 page = alloc_pages_node(cpu_to_node(cpu),
2661 GFP_KERNEL | __GFP_NORETRY, 0);
2662 if (!page)
2663 goto failed;
2664
2665 event = page_address(page);
2666 memset(event, 0, sizeof(*event));
2667
2668 per_cpu(trace_buffered_event, cpu) = event;
2669
2670 preempt_disable();
2671 if (cpu == smp_processor_id() &&
Xianting Tianb427e762020-08-13 19:28:03 +08002672 __this_cpu_read(trace_buffered_event) !=
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002673 per_cpu(trace_buffered_event, cpu))
2674 WARN_ON_ONCE(1);
2675 preempt_enable();
2676 }
2677
2678 return;
2679 failed:
2680 trace_buffered_event_disable();
2681}
2682
2683static void enable_trace_buffered_event(void *data)
2684{
2685 /* Probably not needed, but do it anyway */
2686 smp_rmb();
2687 this_cpu_dec(trace_buffered_event_cnt);
2688}
2689
2690static void disable_trace_buffered_event(void *data)
2691{
2692 this_cpu_inc(trace_buffered_event_cnt);
2693}
2694
2695/**
2696 * trace_buffered_event_disable - disable buffering events
2697 *
2698 * When a filter is removed, it is faster to not use the buffered
2699 * events, and to commit directly into the ring buffer. Free up
2700 * the temp buffers when there are no more users. This requires
2701 * special synchronization with current events.
2702 */
2703void trace_buffered_event_disable(void)
2704{
2705 int cpu;
2706
2707 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2708
2709 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2710 return;
2711
2712 if (--trace_buffered_event_ref)
2713 return;
2714
2715 preempt_disable();
2716 /* For each CPU, set the buffer as used. */
2717 smp_call_function_many(tracing_buffer_mask,
2718 disable_trace_buffered_event, NULL, 1);
2719 preempt_enable();
2720
2721 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002722 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002723
2724 for_each_tracing_cpu(cpu) {
2725 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2726 per_cpu(trace_buffered_event, cpu) = NULL;
2727 }
2728 /*
2729 * Make sure trace_buffered_event is NULL before clearing
2730 * trace_buffered_event_cnt.
2731 */
2732 smp_wmb();
2733
2734 preempt_disable();
2735 /* Do the work on each cpu */
2736 smp_call_function_many(tracing_buffer_mask,
2737 enable_trace_buffered_event, NULL, 1);
2738 preempt_enable();
2739}
2740
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002741static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002742
Steven Rostedtef5580d2009-02-27 19:38:04 -05002743struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002744trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002745 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002746 int type, unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002747 unsigned int trace_ctx)
Steven Rostedtccb469a2012-08-02 10:32:10 -04002748{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002749 struct ring_buffer_event *entry;
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002750 struct trace_array *tr = trace_file->tr;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002751 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002752
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002753 *current_rb = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002754
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002755 if (!tr->no_filter_buffering_ref &&
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002756 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2757 preempt_disable_notrace();
Steven Rostedt (VMware)8f0901c2021-06-09 18:04:59 -04002758 /*
2759 * Filtering is on, so try to use the per cpu buffer first.
2760 * This buffer will simulate a ring_buffer_event,
2761 * where the type_len is zero and the array[0] will
2762 * hold the full length.
2763 * (see include/linux/ring-buffer.h for details on
2764 * how the ring_buffer_event is structured).
2765 *
2766 * Using a temp buffer during filtering and copying it
2767 * on a matched filter is quicker than writing directly
2768 * into the ring buffer and then discarding it when
2769 * it doesn't match. That is because the discard
2770 * requires several atomic operations to get right.
2771 * Copying on match and doing nothing on a failed match
2772 * is still quicker than no copy on match, but having
2773 * to discard out of the ring buffer on a failed match.
2774 */
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002775 if ((entry = __this_cpu_read(trace_buffered_event))) {
2776 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
Steven Rostedt (VMware)faa76a62021-06-09 18:04:58 -04002777
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002778 val = this_cpu_inc_return(trace_buffered_event_cnt);
Steven Rostedt (VMware)8f0901c2021-06-09 18:04:59 -04002779
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002780 /*
2781 * Preemption is disabled, but interrupts and NMIs
2782 * can still come in now. If that happens after
2783 * the above increment, then it will have to go
2784 * back to the old method of allocating the event
2785 * on the ring buffer, and if the filter fails, it
2786 * will have to call ring_buffer_discard_commit()
2787 * to remove it.
2788 *
2789 * Need to also check the unlikely case that the
2790 * length is bigger than the temp buffer size.
2791 * If that happens, then the reserve is pretty much
2792 * guaranteed to fail, as the ring buffer currently
2793 * only allows events less than a page. But that may
2794 * change in the future, so let the ring buffer reserve
2795 * handle the failure in that case.
2796 */
2797 if (val == 1 && likely(len <= max_len)) {
2798 trace_event_setup(entry, type, trace_ctx);
2799 entry->array[0] = len;
2800 /* Return with preemption disabled */
2801 return entry;
2802 }
2803 this_cpu_dec(trace_buffered_event_cnt);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002804 }
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002805 /* __trace_buffer_lock_reserve() disables preemption */
2806 preempt_enable_notrace();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002807 }
2808
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002809 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2810 trace_ctx);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002811 /*
2812 * If tracing is off, but we have triggers enabled
2813 * we still need to look at the event data. Use the temp_buffer
Qiujun Huang906695e2020-10-31 16:57:14 +08002814 * to store the trace event for the trigger to use. It's recursive
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002815 * safe and will not be recorded anywhere.
2816 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002817 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002818 *current_rb = temp_buffer;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002819 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2820 trace_ctx);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002821 }
2822 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002823}
2824EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2825
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002826static DEFINE_SPINLOCK(tracepoint_iter_lock);
2827static DEFINE_MUTEX(tracepoint_printk_mutex);
2828
2829static void output_printk(struct trace_event_buffer *fbuffer)
2830{
2831 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002832 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002833 struct trace_event *event;
2834 unsigned long flags;
2835 struct trace_iterator *iter = tracepoint_print_iter;
2836
2837 /* We should never get here if iter is NULL */
2838 if (WARN_ON_ONCE(!iter))
2839 return;
2840
2841 event_call = fbuffer->trace_file->event_call;
2842 if (!event_call || !event_call->event.funcs ||
2843 !event_call->event.funcs->trace)
2844 return;
2845
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002846 file = fbuffer->trace_file;
2847 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2848 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2849 !filter_match_preds(file->filter, fbuffer->entry)))
2850 return;
2851
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002852 event = &fbuffer->trace_file->event_call->event;
2853
2854 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2855 trace_seq_init(&iter->seq);
2856 iter->ent = fbuffer->entry;
2857 event_call->event.funcs->trace(iter, 0, event);
2858 trace_seq_putc(&iter->seq, 0);
2859 printk("%s", iter->seq.buffer);
2860
2861 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2862}
2863
2864int tracepoint_printk_sysctl(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002865 void *buffer, size_t *lenp,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002866 loff_t *ppos)
2867{
2868 int save_tracepoint_printk;
2869 int ret;
2870
2871 mutex_lock(&tracepoint_printk_mutex);
2872 save_tracepoint_printk = tracepoint_printk;
2873
2874 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2875
2876 /*
2877 * This will force exiting early, as tracepoint_printk
2878 * is always zero when tracepoint_printk_iter is not allocated
2879 */
2880 if (!tracepoint_print_iter)
2881 tracepoint_printk = 0;
2882
2883 if (save_tracepoint_printk == tracepoint_printk)
2884 goto out;
2885
2886 if (tracepoint_printk)
2887 static_key_enable(&tracepoint_printk_key.key);
2888 else
2889 static_key_disable(&tracepoint_printk_key.key);
2890
2891 out:
2892 mutex_unlock(&tracepoint_printk_mutex);
2893
2894 return ret;
2895}
2896
2897void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2898{
Pingfan Liu6c34df62021-08-14 11:45:38 +08002899 enum event_trigger_type tt = ETT_NONE;
2900 struct trace_event_file *file = fbuffer->trace_file;
2901
2902 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2903 fbuffer->entry, &tt))
2904 goto discard;
2905
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002906 if (static_key_false(&tracepoint_printk_key.key))
2907 output_printk(fbuffer);
2908
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +03002909 if (static_branch_unlikely(&trace_event_exports_enabled))
2910 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
Pingfan Liu6c34df62021-08-14 11:45:38 +08002911
2912 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2913 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2914
2915discard:
2916 if (tt)
2917 event_triggers_post_call(file, tt);
2918
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002919}
2920EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2921
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002922/*
2923 * Skip 3:
2924 *
2925 * trace_buffer_unlock_commit_regs()
2926 * trace_event_buffer_commit()
2927 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302928 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002929# define STACK_SKIP 3
2930
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002931void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002932 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002933 struct ring_buffer_event *event,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002934 unsigned int trace_ctx,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002935 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002936{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002937 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002938
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002939 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002940 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002941 * Note, we can still get here via blktrace, wakeup tracer
2942 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002943 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002944 */
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002945 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2946 ftrace_trace_userstack(tr, buffer, trace_ctx);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002947}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002948
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002949/*
2950 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2951 */
2952void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002953trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002954 struct ring_buffer_event *event)
2955{
2956 __buffer_unlock_commit(buffer, event);
2957}
2958
Ingo Molnare309b412008-05-12 21:20:51 +02002959void
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002960trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2961 parent_ip, unsigned int trace_ctx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002962{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002963 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002964 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002965 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002966 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002967
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002968 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002969 trace_ctx);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002970 if (!event)
2971 return;
2972 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002973 entry->ip = ip;
2974 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002975
Chunyan Zhang478409d2016-11-21 15:57:18 +08002976 if (!call_filter_check_discard(call, entry, buffer, event)) {
Tingwei Zhang8438f522020-10-05 10:13:13 +03002977 if (static_branch_unlikely(&trace_function_exports_enabled))
2978 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002979 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002980 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002981}
2982
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002983#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002984
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002985/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2986#define FTRACE_KSTACK_NESTING 4
2987
2988#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2989
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002990struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002991 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002992};
2993
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002994
2995struct ftrace_stacks {
2996 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2997};
2998
2999static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003000static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3001
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003002static void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003003 unsigned int trace_ctx,
3004 int skip, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02003005{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003006 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003007 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003008 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003009 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04003010 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003011 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02003012
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003013 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003014 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04003015 * If regs is set, then these functions will not be in the way.
3016 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003017#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04003018 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003019 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003020#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04003021
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003022 preempt_disable_notrace();
3023
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003024 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3025
3026 /* This should never happen. If it does, yell once and skip */
Qiujun Huang906695e2020-10-31 16:57:14 +08003027 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003028 goto out;
3029
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003030 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003031 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3032 * interrupt will either see the value pre increment or post
3033 * increment. If the interrupt happens pre increment it will have
3034 * restored the counter when it returns. We just need a barrier to
3035 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003036 */
3037 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003038
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003039 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003040 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003041
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003042 if (regs) {
3043 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3044 size, skip);
3045 } else {
3046 nr_entries = stack_trace_save(fstack->calls, size, skip);
3047 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003048
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003049 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003050 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
Steven Rostedt (VMware)9deb1932021-04-01 13:54:40 -04003051 (sizeof(*entry) - sizeof(entry->caller)) + size,
3052 trace_ctx);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003053 if (!event)
3054 goto out;
3055 entry = ring_buffer_event_data(event);
3056
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003057 memcpy(&entry->caller, fstack->calls, size);
3058 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003059
Tom Zanussif306cc82013-10-24 08:34:17 -05003060 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003061 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003062
3063 out:
3064 /* Again, don't let gcc optimize things here */
3065 barrier();
Shan Wei82146522012-11-19 13:21:01 +08003066 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003067 preempt_enable_notrace();
3068
Ingo Molnarf0a920d2008-05-12 21:20:47 +02003069}
3070
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003071static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003072 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003073 unsigned int trace_ctx,
3074 int skip, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05003075{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003076 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05003077 return;
3078
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003079 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05003080}
3081
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003082void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3083 int skip)
Steven Rostedt38697052008-10-01 13:14:09 -04003084{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003085 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003086
3087 if (rcu_is_watching()) {
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003088 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003089 return;
3090 }
3091
3092 /*
3093 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3094 * but if the above rcu_is_watching() failed, then the NMI
3095 * triggered someplace critical, and rcu_irq_enter() should
3096 * not be called from NMI.
3097 */
3098 if (unlikely(in_nmi()))
3099 return;
3100
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003101 rcu_irq_enter_irqson();
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003102 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003103 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003104}
3105
Steven Rostedt03889382009-12-11 09:48:22 -05003106/**
3107 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003108 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003109 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003110void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003111{
Steven Rostedt03889382009-12-11 09:48:22 -05003112 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003113 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003114
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003115#ifndef CONFIG_UNWINDER_ORC
3116 /* Skip 1 to skip this function. */
3117 skip++;
3118#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003119 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003120 tracing_gen_ctx(), skip, NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003121}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003122EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003123
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003124#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003125static DEFINE_PER_CPU(int, user_stack_count);
3126
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003127static void
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003128ftrace_trace_userstack(struct trace_array *tr,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003129 struct trace_buffer *buffer, unsigned int trace_ctx)
Török Edwin02b67512008-11-22 13:28:47 +02003130{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003131 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003132 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003133 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003134
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003135 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003136 return;
3137
Steven Rostedtb6345872010-03-12 20:03:30 -05003138 /*
3139 * NMIs can not handle page faults, even with fix ups.
3140 * The save user stack can (and often does) fault.
3141 */
3142 if (unlikely(in_nmi()))
3143 return;
3144
Steven Rostedt91e86e52010-11-10 12:56:12 +01003145 /*
3146 * prevent recursion, since the user stack tracing may
3147 * trigger other kernel events.
3148 */
3149 preempt_disable();
3150 if (__this_cpu_read(user_stack_count))
3151 goto out;
3152
3153 __this_cpu_inc(user_stack_count);
3154
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003155 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003156 sizeof(*entry), trace_ctx);
Török Edwin02b67512008-11-22 13:28:47 +02003157 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003158 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003159 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003160
Steven Rostedt48659d32009-09-11 11:36:23 -04003161 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003162 memset(&entry->caller, 0, sizeof(entry->caller));
3163
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003164 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003165 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003166 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003167
Li Zefan1dbd1952010-12-09 15:47:56 +08003168 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003169 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003170 out:
3171 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003172}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003173#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003174static void ftrace_trace_userstack(struct trace_array *tr,
3175 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003176 unsigned int trace_ctx)
Török Edwin02b67512008-11-22 13:28:47 +02003177{
Török Edwin02b67512008-11-22 13:28:47 +02003178}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003179#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003180
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003181#endif /* CONFIG_STACKTRACE */
3182
Yordan Karadzhov (VMware)c6587972021-04-15 21:18:52 +03003183static inline void
3184func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3185 unsigned long long delta)
3186{
3187 entry->bottom_delta_ts = delta & U32_MAX;
3188 entry->top_delta_ts = (delta >> 32);
3189}
3190
3191void trace_last_func_repeats(struct trace_array *tr,
3192 struct trace_func_repeats *last_info,
3193 unsigned int trace_ctx)
3194{
3195 struct trace_buffer *buffer = tr->array_buffer.buffer;
3196 struct func_repeats_entry *entry;
3197 struct ring_buffer_event *event;
3198 u64 delta;
3199
3200 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3201 sizeof(*entry), trace_ctx);
3202 if (!event)
3203 return;
3204
3205 delta = ring_buffer_event_time_stamp(buffer, event) -
3206 last_info->ts_last_call;
3207
3208 entry = ring_buffer_event_data(event);
3209 entry->ip = last_info->ip;
3210 entry->parent_ip = last_info->parent_ip;
3211 entry->count = last_info->count;
3212 func_repeats_set_delta_ts(entry, delta);
3213
3214 __buffer_unlock_commit(buffer, event);
3215}
3216
Steven Rostedt07d777f2011-09-22 14:01:55 -04003217/* created for use with alloc_percpu */
3218struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003219 int nesting;
3220 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003221};
3222
Naveen N. Raof28439d2021-12-23 16:04:39 +05303223static struct trace_buffer_struct __percpu *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003224
3225/*
Qiujun Huang2b5894c2020-10-29 23:05:54 +08003226 * This allows for lockless recording. If we're nested too deeply, then
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003227 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003228 */
3229static char *get_trace_buf(void)
3230{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003231 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003232
Naveen N. Rao823e6702021-12-23 16:04:38 +05303233 if (!trace_percpu_buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003234 return NULL;
3235
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003236 buffer->nesting++;
3237
3238 /* Interrupts must see nesting incremented before we use the buffer */
3239 barrier();
Qiujun Huangc1acb4a2020-10-30 00:19:05 +08003240 return &buffer->buffer[buffer->nesting - 1][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003241}
3242
3243static void put_trace_buf(void)
3244{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003245 /* Don't let the decrement of nesting leak before this */
3246 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003247 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003248}
3249
3250static int alloc_percpu_trace_buffer(void)
3251{
Naveen N. Raof28439d2021-12-23 16:04:39 +05303252 struct trace_buffer_struct __percpu *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003253
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003254 if (trace_percpu_buffer)
3255 return 0;
3256
Steven Rostedt07d777f2011-09-22 14:01:55 -04003257 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003258 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003259 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003260
3261 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003262 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003263}
3264
Steven Rostedt81698832012-10-11 10:15:05 -04003265static int buffers_allocated;
3266
Steven Rostedt07d777f2011-09-22 14:01:55 -04003267void trace_printk_init_buffers(void)
3268{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003269 if (buffers_allocated)
3270 return;
3271
3272 if (alloc_percpu_trace_buffer())
3273 return;
3274
Steven Rostedt2184db42014-05-28 13:14:40 -04003275 /* trace_printk() is for debug use only. Don't use it in production. */
3276
Joe Perchesa395d6a2016-03-22 14:28:09 -07003277 pr_warn("\n");
3278 pr_warn("**********************************************************\n");
3279 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3280 pr_warn("** **\n");
3281 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3282 pr_warn("** **\n");
3283 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3284 pr_warn("** unsafe for production use. **\n");
3285 pr_warn("** **\n");
3286 pr_warn("** If you see this message and you are not debugging **\n");
3287 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3288 pr_warn("** **\n");
3289 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3290 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003291
Steven Rostedtb382ede62012-10-10 21:44:34 -04003292 /* Expand the buffers to set size */
3293 tracing_update_buffers();
3294
Steven Rostedt07d777f2011-09-22 14:01:55 -04003295 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003296
3297 /*
3298 * trace_printk_init_buffers() can be called by modules.
3299 * If that happens, then we need to start cmdline recording
3300 * directly here. If the global_trace.buffer is already
3301 * allocated here, then this was called by module code.
3302 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003303 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003304 tracing_start_cmdline_record();
3305}
Divya Indif45d1222019-03-20 11:28:51 -07003306EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003307
3308void trace_printk_start_comm(void)
3309{
3310 /* Start tracing comms if trace printk is set */
3311 if (!buffers_allocated)
3312 return;
3313 tracing_start_cmdline_record();
3314}
3315
3316static void trace_printk_start_stop_comm(int enabled)
3317{
3318 if (!buffers_allocated)
3319 return;
3320
3321 if (enabled)
3322 tracing_start_cmdline_record();
3323 else
3324 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003325}
3326
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003327/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003328 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003329 * @ip: The address of the caller
3330 * @fmt: The string format to write to the buffer
3331 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003332 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003333int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003334{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003335 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003336 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003337 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003338 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003339 struct bprint_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003340 unsigned int trace_ctx;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003341 char *tbuffer;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003342 int len = 0, size;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003343
3344 if (unlikely(tracing_selftest_running || tracing_disabled))
3345 return 0;
3346
3347 /* Don't pollute graph traces with trace_vprintk internals */
3348 pause_graph_tracing();
3349
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003350 trace_ctx = tracing_gen_ctx();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003351 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003352
Steven Rostedt07d777f2011-09-22 14:01:55 -04003353 tbuffer = get_trace_buf();
3354 if (!tbuffer) {
3355 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003356 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003357 }
3358
3359 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3360
3361 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003362 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003363
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003364 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003365 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003366 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003367 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003368 trace_ctx);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003369 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003370 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003371 entry = ring_buffer_event_data(event);
3372 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003373 entry->fmt = fmt;
3374
Steven Rostedt07d777f2011-09-22 14:01:55 -04003375 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003376 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003377 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003378 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003379 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003380
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003381out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003382 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003383out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003384 put_trace_buf();
3385
3386out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003387 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003388 unpause_graph_tracing();
3389
3390 return len;
3391}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003392EXPORT_SYMBOL_GPL(trace_vbprintk);
3393
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003394__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003395static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003396__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003397 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003398{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003399 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003400 struct ring_buffer_event *event;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003401 int len = 0, size;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003402 struct print_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003403 unsigned int trace_ctx;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003404 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003405
3406 if (tracing_disabled || tracing_selftest_running)
3407 return 0;
3408
Steven Rostedt07d777f2011-09-22 14:01:55 -04003409 /* Don't pollute graph traces with trace_vprintk internals */
3410 pause_graph_tracing();
3411
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003412 trace_ctx = tracing_gen_ctx();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003413 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003414
Steven Rostedt07d777f2011-09-22 14:01:55 -04003415
3416 tbuffer = get_trace_buf();
3417 if (!tbuffer) {
3418 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003419 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003420 }
3421
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003422 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003423
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003424 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003425 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003426 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003427 trace_ctx);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003428 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003429 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003430 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003431 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003432
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003433 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003434 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003435 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003436 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003437 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003438
3439out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003440 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003441 put_trace_buf();
3442
3443out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003444 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003445 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003446
3447 return len;
3448}
Steven Rostedt659372d2009-09-03 19:11:07 -04003449
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003450__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003451int trace_array_vprintk(struct trace_array *tr,
3452 unsigned long ip, const char *fmt, va_list args)
3453{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003454 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003455}
3456
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003457/**
3458 * trace_array_printk - Print a message to a specific instance
3459 * @tr: The instance trace_array descriptor
3460 * @ip: The instruction pointer that this is called from.
3461 * @fmt: The format to print (printf format)
3462 *
3463 * If a subsystem sets up its own instance, they have the right to
3464 * printk strings into their tracing instance buffer using this
3465 * function. Note, this function will not write into the top level
3466 * buffer (use trace_printk() for that), as writing into the top level
3467 * buffer should only have events that can be individually disabled.
3468 * trace_printk() is only used for debugging a kernel, and should not
Ingo Molnarf2cc0202021-03-23 18:49:35 +01003469 * be ever incorporated in normal use.
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003470 *
3471 * trace_array_printk() can be used, as it will not add noise to the
3472 * top level tracing buffer.
3473 *
3474 * Note, trace_array_init_printk() must be called on @tr before this
3475 * can be used.
3476 */
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003477__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003478int trace_array_printk(struct trace_array *tr,
3479 unsigned long ip, const char *fmt, ...)
3480{
3481 int ret;
3482 va_list ap;
3483
Divya Indi953ae452019-08-14 10:55:25 -07003484 if (!tr)
3485 return -ENOENT;
3486
Steven Rostedt (VMware)c791cc42020-06-16 14:53:55 -04003487 /* This is only allowed for created instances */
3488 if (tr == &global_trace)
3489 return 0;
3490
3491 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3492 return 0;
3493
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003494 va_start(ap, fmt);
3495 ret = trace_array_vprintk(tr, ip, fmt, ap);
3496 va_end(ap);
3497 return ret;
3498}
Divya Indif45d1222019-03-20 11:28:51 -07003499EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003500
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003501/**
3502 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3503 * @tr: The trace array to initialize the buffers for
3504 *
3505 * As trace_array_printk() only writes into instances, they are OK to
3506 * have in the kernel (unlike trace_printk()). This needs to be called
3507 * before trace_array_printk() can be used on a trace_array.
3508 */
3509int trace_array_init_printk(struct trace_array *tr)
3510{
3511 if (!tr)
3512 return -ENOENT;
3513
3514 /* This is only allowed for created instances */
3515 if (tr == &global_trace)
3516 return -EINVAL;
3517
3518 return alloc_percpu_trace_buffer();
3519}
3520EXPORT_SYMBOL_GPL(trace_array_init_printk);
3521
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003522__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003523int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003524 unsigned long ip, const char *fmt, ...)
3525{
3526 int ret;
3527 va_list ap;
3528
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003529 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003530 return 0;
3531
3532 va_start(ap, fmt);
3533 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3534 va_end(ap);
3535 return ret;
3536}
3537
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003538__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003539int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3540{
Steven Rostedta813a152009-10-09 01:41:35 -04003541 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003542}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003543EXPORT_SYMBOL_GPL(trace_vprintk);
3544
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003545static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003546{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003547 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3548
Steven Rostedt5a90f572008-09-03 17:42:51 -04003549 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003550 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003551 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003552}
3553
Ingo Molnare309b412008-05-12 21:20:51 +02003554static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003555peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3556 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003557{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003558 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003559 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003560
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003561 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003562 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003563 if (lost_events)
3564 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3565 (unsigned long)-1 : 0;
3566 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003567 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003568 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003569 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003570
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003571 if (event) {
3572 iter->ent_size = ring_buffer_event_length(event);
3573 return ring_buffer_event_data(event);
3574 }
3575 iter->ent_size = 0;
3576 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003577}
Steven Rostedtd7690412008-10-01 00:29:53 -04003578
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003579static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003580__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3581 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003582{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003583 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003584 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003585 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003586 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003587 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003588 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003589 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003590 int cpu;
3591
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003592 /*
3593 * If we are in a per_cpu trace file, don't bother by iterating over
3594 * all cpu and peek directly.
3595 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003596 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003597 if (ring_buffer_empty_cpu(buffer, cpu_file))
3598 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003599 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003600 if (ent_cpu)
3601 *ent_cpu = cpu_file;
3602
3603 return ent;
3604 }
3605
Steven Rostedtab464282008-05-12 21:21:00 +02003606 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003607
3608 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003609 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003610
Steven Rostedtbc21b472010-03-31 19:49:26 -04003611 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003612
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003613 /*
3614 * Pick the entry with the smallest timestamp:
3615 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003616 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003617 next = ent;
3618 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003619 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003620 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003621 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003622 }
3623 }
3624
Steven Rostedt12b5da32012-03-27 10:43:28 -04003625 iter->ent_size = next_size;
3626
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003627 if (ent_cpu)
3628 *ent_cpu = next_cpu;
3629
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003630 if (ent_ts)
3631 *ent_ts = next_ts;
3632
Steven Rostedtbc21b472010-03-31 19:49:26 -04003633 if (missing_events)
3634 *missing_events = next_lost;
3635
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003636 return next;
3637}
3638
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003639#define STATIC_FMT_BUF_SIZE 128
3640static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3641
3642static char *trace_iter_expand_format(struct trace_iterator *iter)
3643{
3644 char *tmp;
3645
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -04003646 /*
3647 * iter->tr is NULL when used with tp_printk, which makes
3648 * this get called where it is not safe to call krealloc().
3649 */
3650 if (!iter->tr || iter->fmt == static_fmt_buf)
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003651 return NULL;
3652
3653 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3654 GFP_KERNEL);
3655 if (tmp) {
3656 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3657 iter->fmt = tmp;
3658 }
3659
3660 return tmp;
3661}
3662
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003663/* Returns true if the string is safe to dereference from an event */
3664static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3665{
3666 unsigned long addr = (unsigned long)str;
3667 struct trace_event *trace_event;
3668 struct trace_event_call *event;
3669
3670 /* OK if part of the event data */
3671 if ((addr >= (unsigned long)iter->ent) &&
3672 (addr < (unsigned long)iter->ent + iter->ent_size))
3673 return true;
3674
3675 /* OK if part of the temp seq buffer */
3676 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3677 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3678 return true;
3679
3680 /* Core rodata can not be freed */
3681 if (is_kernel_rodata(addr))
3682 return true;
3683
3684 if (trace_is_tracepoint_string(str))
3685 return true;
3686
3687 /*
3688 * Now this could be a module event, referencing core module
3689 * data, which is OK.
3690 */
3691 if (!iter->ent)
3692 return false;
3693
3694 trace_event = ftrace_find_event(iter->ent->type);
3695 if (!trace_event)
3696 return false;
3697
3698 event = container_of(trace_event, struct trace_event_call, event);
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -04003699 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003700 return false;
3701
3702 /* Would rather have rodata, but this will suffice */
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -04003703 if (within_module_core(addr, event->module))
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003704 return true;
3705
3706 return false;
3707}
3708
3709static const char *show_buffer(struct trace_seq *s)
3710{
3711 struct seq_buf *seq = &s->seq;
3712
3713 seq_buf_terminate(seq);
3714
3715 return seq->buffer;
3716}
3717
3718static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3719
3720static int test_can_verify_check(const char *fmt, ...)
3721{
3722 char buf[16];
3723 va_list ap;
3724 int ret;
3725
3726 /*
3727 * The verifier is dependent on vsnprintf() modifies the va_list
3728 * passed to it, where it is sent as a reference. Some architectures
3729 * (like x86_32) passes it by value, which means that vsnprintf()
3730 * does not modify the va_list passed to it, and the verifier
3731 * would then need to be able to understand all the values that
3732 * vsnprintf can use. If it is passed by value, then the verifier
3733 * is disabled.
3734 */
3735 va_start(ap, fmt);
3736 vsnprintf(buf, 16, "%d", ap);
3737 ret = va_arg(ap, int);
3738 va_end(ap);
3739
3740 return ret;
3741}
3742
3743static void test_can_verify(void)
3744{
3745 if (!test_can_verify_check("%d %d", 0, 1)) {
3746 pr_info("trace event string verifier disabled\n");
3747 static_branch_inc(&trace_no_verify);
3748 }
3749}
3750
3751/**
3752 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3753 * @iter: The iterator that holds the seq buffer and the event being printed
3754 * @fmt: The format used to print the event
3755 * @ap: The va_list holding the data to print from @fmt.
3756 *
3757 * This writes the data into the @iter->seq buffer using the data from
3758 * @fmt and @ap. If the format has a %s, then the source of the string
3759 * is examined to make sure it is safe to print, otherwise it will
3760 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3761 * pointer.
3762 */
3763void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3764 va_list ap)
3765{
3766 const char *p = fmt;
3767 const char *str;
3768 int i, j;
3769
3770 if (WARN_ON_ONCE(!fmt))
3771 return;
3772
3773 if (static_branch_unlikely(&trace_no_verify))
3774 goto print;
3775
3776 /* Don't bother checking when doing a ftrace_dump() */
3777 if (iter->fmt == static_fmt_buf)
3778 goto print;
3779
3780 while (*p) {
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003781 bool star = false;
3782 int len = 0;
3783
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003784 j = 0;
3785
3786 /* We only care about %s and variants */
3787 for (i = 0; p[i]; i++) {
3788 if (i + 1 >= iter->fmt_size) {
3789 /*
3790 * If we can't expand the copy buffer,
3791 * just print it.
3792 */
3793 if (!trace_iter_expand_format(iter))
3794 goto print;
3795 }
3796
3797 if (p[i] == '\\' && p[i+1]) {
3798 i++;
3799 continue;
3800 }
3801 if (p[i] == '%') {
3802 /* Need to test cases like %08.*s */
3803 for (j = 1; p[i+j]; j++) {
3804 if (isdigit(p[i+j]) ||
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003805 p[i+j] == '.')
3806 continue;
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003807 if (p[i+j] == '*') {
3808 star = true;
3809 continue;
3810 }
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003811 break;
3812 }
3813 if (p[i+j] == 's')
3814 break;
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003815 star = false;
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003816 }
3817 j = 0;
3818 }
3819 /* If no %s found then just print normally */
3820 if (!p[i])
3821 break;
3822
3823 /* Copy up to the %s, and print that */
3824 strncpy(iter->fmt, p, i);
3825 iter->fmt[i] = '\0';
3826 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3827
Nikita Yushchenko2ef75e92021-11-18 17:55:16 +03003828 /*
3829 * If iter->seq is full, the above call no longer guarantees
3830 * that ap is in sync with fmt processing, and further calls
3831 * to va_arg() can return wrong positional arguments.
3832 *
3833 * Ensure that ap is no longer used in this case.
3834 */
3835 if (iter->seq.full) {
3836 p = "";
3837 break;
3838 }
3839
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003840 if (star)
3841 len = va_arg(ap, int);
3842
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003843 /* The ap now points to the string data of the %s */
3844 str = va_arg(ap, const char *);
3845
3846 /*
3847 * If you hit this warning, it is likely that the
3848 * trace event in question used %s on a string that
3849 * was saved at the time of the event, but may not be
3850 * around when the trace is read. Use __string(),
3851 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3852 * instead. See samples/trace_events/trace-events-sample.h
3853 * for reference.
3854 */
3855 if (WARN_ONCE(!trace_safe_str(iter, str),
3856 "fmt: '%s' current_buffer: '%s'",
3857 fmt, show_buffer(&iter->seq))) {
3858 int ret;
3859
3860 /* Try to safely read the string */
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003861 if (star) {
3862 if (len + 1 > iter->fmt_size)
3863 len = iter->fmt_size - 1;
3864 if (len < 0)
3865 len = 0;
3866 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3867 iter->fmt[len] = 0;
3868 star = false;
3869 } else {
3870 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3871 iter->fmt_size);
3872 }
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003873 if (ret < 0)
3874 trace_seq_printf(&iter->seq, "(0x%px)", str);
3875 else
3876 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3877 str, iter->fmt);
3878 str = "[UNSAFE-MEMORY]";
3879 strcpy(iter->fmt, "%s");
3880 } else {
3881 strncpy(iter->fmt, p + i, j + 1);
3882 iter->fmt[j+1] = '\0';
3883 }
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003884 if (star)
3885 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3886 else
3887 trace_seq_printf(&iter->seq, iter->fmt, str);
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003888
3889 p += i + j + 1;
3890 }
3891 print:
3892 if (*p)
3893 trace_seq_vprintf(&iter->seq, p, ap);
3894}
3895
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003896const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3897{
3898 const char *p, *new_fmt;
3899 char *q;
3900
3901 if (WARN_ON_ONCE(!fmt))
3902 return fmt;
3903
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -04003904 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
Masami Hiramatsua345a672020-10-15 23:55:25 +09003905 return fmt;
3906
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003907 p = fmt;
3908 new_fmt = q = iter->fmt;
3909 while (*p) {
3910 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3911 if (!trace_iter_expand_format(iter))
3912 return fmt;
3913
3914 q += iter->fmt - new_fmt;
3915 new_fmt = iter->fmt;
3916 }
3917
3918 *q++ = *p++;
3919
3920 /* Replace %p with %px */
3921 if (p[-1] == '%') {
3922 if (p[0] == '%') {
3923 *q++ = *p++;
3924 } else if (p[0] == 'p' && !isalnum(p[1])) {
3925 *q++ = *p++;
3926 *q++ = 'x';
3927 }
3928 }
3929 }
3930 *q = '\0';
3931
3932 return new_fmt;
3933}
3934
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003935#define STATIC_TEMP_BUF_SIZE 128
Minchan Kim8fa655a2020-11-25 14:56:54 -08003936static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003937
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003938/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003939struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3940 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003941{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003942 /* __find_next_entry will reset ent_size */
3943 int ent_size = iter->ent_size;
3944 struct trace_entry *entry;
3945
3946 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003947 * If called from ftrace_dump(), then the iter->temp buffer
3948 * will be the static_temp_buf and not created from kmalloc.
3949 * If the entry size is greater than the buffer, we can
3950 * not save it. Just return NULL in that case. This is only
3951 * used to add markers when two consecutive events' time
3952 * stamps have a large delta. See trace_print_lat_context()
3953 */
3954 if (iter->temp == static_temp_buf &&
3955 STATIC_TEMP_BUF_SIZE < ent_size)
3956 return NULL;
3957
3958 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003959 * The __find_next_entry() may call peek_next_entry(), which may
3960 * call ring_buffer_peek() that may make the contents of iter->ent
3961 * undefined. Need to copy iter->ent now.
3962 */
3963 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003964 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3965 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003966 void *temp;
3967 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3968 if (!temp)
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003969 return NULL;
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003970 kfree(iter->temp);
3971 iter->temp = temp;
3972 iter->temp_size = iter->ent_size;
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003973 }
3974 memcpy(iter->temp, iter->ent, iter->ent_size);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003975 iter->ent = iter->temp;
3976 }
3977 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3978 /* Put back the original ent_size */
3979 iter->ent_size = ent_size;
3980
3981 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003982}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003983
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003984/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003985void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003986{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003987 iter->ent = __find_next_entry(iter, &iter->cpu,
3988 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003989
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003990 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003991 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003992
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003993 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003994}
3995
Ingo Molnare309b412008-05-12 21:20:51 +02003996static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003997{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003998 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003999 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004000}
4001
Ingo Molnare309b412008-05-12 21:20:51 +02004002static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004003{
4004 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004005 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004006 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004007
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004008 WARN_ON_ONCE(iter->leftover);
4009
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004010 (*pos)++;
4011
4012 /* can't go backwards */
4013 if (iter->idx > i)
4014 return NULL;
4015
4016 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05004017 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004018 else
4019 ent = iter;
4020
4021 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05004022 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004023
4024 iter->pos = *pos;
4025
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004026 return ent;
4027}
4028
Jason Wessel955b61e2010-08-05 09:22:23 -05004029void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004030{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004031 struct ring_buffer_iter *buf_iter;
4032 unsigned long entries = 0;
4033 u64 ts;
4034
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004035 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004036
Steven Rostedt6d158a82012-06-27 20:46:14 -04004037 buf_iter = trace_buffer_iter(iter, cpu);
4038 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004039 return;
4040
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004041 ring_buffer_iter_reset(buf_iter);
4042
4043 /*
4044 * We could have the case with the max latency tracers
4045 * that a reset never took place on a cpu. This is evident
4046 * by the timestamp being before the start of the buffer.
4047 */
YangHui69243722020-06-16 11:36:46 +08004048 while (ring_buffer_iter_peek(buf_iter, &ts)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004049 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004050 break;
4051 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04004052 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004053 }
4054
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004055 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004056}
4057
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004058/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004059 * The current tracer is copied to avoid a global locking
4060 * all around.
4061 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004062static void *s_start(struct seq_file *m, loff_t *pos)
4063{
4064 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004065 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004066 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004067 void *p = NULL;
4068 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004069 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004070
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09004071 /*
4072 * copy the tracer to avoid using a global lock all around.
4073 * iter->trace is a copy of current_trace, the pointer to the
4074 * name may be used instead of a strcmp(), as iter->trace->name
4075 * will point to the same string as current_trace->name.
4076 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004077 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004078 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4079 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004080 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004081
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004082#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004083 if (iter->snapshot && iter->trace->use_max_tr)
4084 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004085#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004086
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004087 if (*pos != iter->pos) {
4088 iter->ent = NULL;
4089 iter->cpu = 0;
4090 iter->idx = -1;
4091
Steven Rostedtae3b5092013-01-23 15:22:59 -05004092 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004093 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004094 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004095 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004096 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004097
Lai Jiangshanac91d852010-03-02 17:54:50 +08004098 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004099 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4100 ;
4101
4102 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004103 /*
4104 * If we overflowed the seq_file before, then we want
4105 * to just reuse the trace_seq buffer again.
4106 */
4107 if (iter->leftover)
4108 p = iter;
4109 else {
4110 l = *pos - 1;
4111 p = s_next(m, p, &l);
4112 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113 }
4114
Lai Jiangshan4f535962009-05-18 19:35:34 +08004115 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004116 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004117 return p;
4118}
4119
4120static void s_stop(struct seq_file *m, void *p)
4121{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004122 struct trace_iterator *iter = m->private;
4123
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004124#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004125 if (iter->snapshot && iter->trace->use_max_tr)
4126 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004127#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004128
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004129 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004130 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004131}
4132
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004133static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004134get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004135 unsigned long *entries, int cpu)
4136{
4137 unsigned long count;
4138
4139 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4140 /*
4141 * If this buffer has skipped entries, then we hold all
4142 * entries for the trace and we need to ignore the
4143 * ones before the time stamp.
4144 */
4145 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4146 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4147 /* total is the same as the entries */
4148 *total = count;
4149 } else
4150 *total = count +
4151 ring_buffer_overrun_cpu(buf->buffer, cpu);
4152 *entries = count;
4153}
4154
4155static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004156get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004157 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004158{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004159 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004160 int cpu;
4161
4162 *total = 0;
4163 *entries = 0;
4164
4165 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004166 get_total_entries_cpu(buf, &t, &e, cpu);
4167 *total += t;
4168 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004169 }
4170}
4171
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004172unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4173{
4174 unsigned long total, entries;
4175
4176 if (!tr)
4177 tr = &global_trace;
4178
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004179 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004180
4181 return entries;
4182}
4183
4184unsigned long trace_total_entries(struct trace_array *tr)
4185{
4186 unsigned long total, entries;
4187
4188 if (!tr)
4189 tr = &global_trace;
4190
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004191 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004192
4193 return entries;
4194}
4195
Ingo Molnare309b412008-05-12 21:20:51 +02004196static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004197{
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004198 seq_puts(m, "# _------=> CPU# \n"
Sebastian Andrzej Siewior289e7b02021-12-13 11:08:53 +01004199 "# / _-----=> irqs-off/BH-disabled\n"
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004200 "# | / _----=> need-resched \n"
4201 "# || / _---=> hardirq/softirq \n"
4202 "# ||| / _--=> preempt-depth \n"
Thomas Gleixner54357f02021-08-10 15:26:25 +02004203 "# |||| / _-=> migrate-disable \n"
4204 "# ||||| / delay \n"
4205 "# cmd pid |||||| time | caller \n"
4206 "# \\ / |||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004207}
4208
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004209static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004210{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004211 unsigned long total;
4212 unsigned long entries;
4213
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004214 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004215 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4216 entries, total, num_online_cpus());
4217 seq_puts(m, "#\n");
4218}
4219
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004220static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004221 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004222{
Joel Fernandes441dae82017-06-25 22:38:43 -07004223 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4224
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004225 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07004226
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004227 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4228 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004229}
4230
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004231static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004232 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05004233{
Joel Fernandes441dae82017-06-25 22:38:43 -07004234 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004235 const char *space = " ";
4236 int prec = tgid ? 12 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07004237
Quentin Perret9e738212019-02-14 15:29:50 +00004238 print_event_info(buf, m);
4239
Sebastian Andrzej Siewior289e7b02021-12-13 11:08:53 +01004240 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004241 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4242 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4243 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
Thomas Gleixner54357f02021-08-10 15:26:25 +02004244 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4245 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4246 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4247 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05004248}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004249
Jiri Olsa62b915f2010-04-02 19:01:22 +02004250void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004251print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4252{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004253 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004254 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004255 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004256 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004257 unsigned long entries;
4258 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004259 const char *name = "preemption";
4260
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05004261 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004262
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004263 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004264
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004265 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004266 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004267 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004268 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004269 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004270 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02004271 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004272 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02004273 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004274 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004275#if defined(CONFIG_PREEMPT_NONE)
4276 "server",
4277#elif defined(CONFIG_PREEMPT_VOLUNTARY)
4278 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04004279#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004280 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02004281#elif defined(CONFIG_PREEMPT_RT)
4282 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004283#else
4284 "unknown",
4285#endif
4286 /* These are reserved for later use */
4287 0, 0, 0, 0);
4288#ifdef CONFIG_SMP
4289 seq_printf(m, " #P:%d)\n", num_online_cpus());
4290#else
4291 seq_puts(m, ")\n");
4292#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004293 seq_puts(m, "# -----------------\n");
4294 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004295 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07004296 data->comm, data->pid,
4297 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004298 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004299 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004300
4301 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004302 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02004303 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4304 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004305 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02004306 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4307 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04004308 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004309 }
4310
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004311 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004312}
4313
Steven Rostedta3097202008-11-07 22:36:02 -05004314static void test_cpu_buff_start(struct trace_iterator *iter)
4315{
4316 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004317 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05004318
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004319 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004320 return;
4321
4322 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4323 return;
4324
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07004325 if (cpumask_available(iter->started) &&
4326 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05004327 return;
4328
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004329 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004330 return;
4331
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07004332 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04004333 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004334
4335 /* Don't print started cpu buffer for the first entry of the trace */
4336 if (iter->idx > 1)
4337 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4338 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05004339}
4340
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004341static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004342{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004343 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02004344 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004345 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004346 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004347 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004348
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004349 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004350
Steven Rostedta3097202008-11-07 22:36:02 -05004351 test_cpu_buff_start(iter);
4352
Steven Rostedtf633cef2008-12-23 23:24:13 -05004353 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004354
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004355 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004356 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4357 trace_print_lat_context(iter);
4358 else
4359 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004360 }
4361
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004362 if (trace_seq_has_overflowed(s))
4363 return TRACE_TYPE_PARTIAL_LINE;
4364
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004365 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004366 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004367
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004368 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004369
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004370 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004371}
4372
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004373static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004374{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004375 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004376 struct trace_seq *s = &iter->seq;
4377 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004378 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004379
4380 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004381
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004382 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004383 trace_seq_printf(s, "%d %d %llu ",
4384 entry->pid, iter->cpu, iter->ts);
4385
4386 if (trace_seq_has_overflowed(s))
4387 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004388
Steven Rostedtf633cef2008-12-23 23:24:13 -05004389 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004390 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004391 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004392
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004393 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004394
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004395 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004396}
4397
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004398static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004399{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004400 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004401 struct trace_seq *s = &iter->seq;
4402 unsigned char newline = '\n';
4403 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004404 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004405
4406 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004407
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004408 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004409 SEQ_PUT_HEX_FIELD(s, entry->pid);
4410 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4411 SEQ_PUT_HEX_FIELD(s, iter->ts);
4412 if (trace_seq_has_overflowed(s))
4413 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004414 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004415
Steven Rostedtf633cef2008-12-23 23:24:13 -05004416 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004417 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04004418 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004419 if (ret != TRACE_TYPE_HANDLED)
4420 return ret;
4421 }
Steven Rostedt7104f302008-10-01 10:52:51 -04004422
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004423 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004424
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004425 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004426}
4427
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004428static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004429{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004430 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004431 struct trace_seq *s = &iter->seq;
4432 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004433 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004434
4435 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004436
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004437 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004438 SEQ_PUT_FIELD(s, entry->pid);
4439 SEQ_PUT_FIELD(s, iter->cpu);
4440 SEQ_PUT_FIELD(s, iter->ts);
4441 if (trace_seq_has_overflowed(s))
4442 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004443 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004444
Steven Rostedtf633cef2008-12-23 23:24:13 -05004445 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04004446 return event ? event->funcs->binary(iter, 0, event) :
4447 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004448}
4449
Jiri Olsa62b915f2010-04-02 19:01:22 +02004450int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004451{
Steven Rostedt6d158a82012-06-27 20:46:14 -04004452 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004453 int cpu;
4454
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004455 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05004456 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004457 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004458 buf_iter = trace_buffer_iter(iter, cpu);
4459 if (buf_iter) {
4460 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004461 return 0;
4462 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004463 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004464 return 0;
4465 }
4466 return 1;
4467 }
4468
Steven Rostedtab464282008-05-12 21:21:00 +02004469 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004470 buf_iter = trace_buffer_iter(iter, cpu);
4471 if (buf_iter) {
4472 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004473 return 0;
4474 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004475 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004476 return 0;
4477 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004478 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004479
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004480 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004481}
4482
Lai Jiangshan4f535962009-05-18 19:35:34 +08004483/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004484enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004485{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004486 struct trace_array *tr = iter->tr;
4487 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004488 enum print_line_t ret;
4489
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004490 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004491 if (iter->lost_events == (unsigned long)-1)
4492 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4493 iter->cpu);
4494 else
4495 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4496 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004497 if (trace_seq_has_overflowed(&iter->seq))
4498 return TRACE_TYPE_PARTIAL_LINE;
4499 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004500
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004501 if (iter->trace && iter->trace->print_line) {
4502 ret = iter->trace->print_line(iter);
4503 if (ret != TRACE_TYPE_UNHANDLED)
4504 return ret;
4505 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004506
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004507 if (iter->ent->type == TRACE_BPUTS &&
4508 trace_flags & TRACE_ITER_PRINTK &&
4509 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4510 return trace_print_bputs_msg_only(iter);
4511
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004512 if (iter->ent->type == TRACE_BPRINT &&
4513 trace_flags & TRACE_ITER_PRINTK &&
4514 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004515 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004516
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004517 if (iter->ent->type == TRACE_PRINT &&
4518 trace_flags & TRACE_ITER_PRINTK &&
4519 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004520 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004521
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004522 if (trace_flags & TRACE_ITER_BIN)
4523 return print_bin_fmt(iter);
4524
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004525 if (trace_flags & TRACE_ITER_HEX)
4526 return print_hex_fmt(iter);
4527
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004528 if (trace_flags & TRACE_ITER_RAW)
4529 return print_raw_fmt(iter);
4530
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004531 return print_trace_fmt(iter);
4532}
4533
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004534void trace_latency_header(struct seq_file *m)
4535{
4536 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004537 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004538
4539 /* print nothing if the buffers are empty */
4540 if (trace_empty(iter))
4541 return;
4542
4543 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4544 print_trace_header(m, iter);
4545
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004546 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004547 print_lat_help_header(m);
4548}
4549
Jiri Olsa62b915f2010-04-02 19:01:22 +02004550void trace_default_header(struct seq_file *m)
4551{
4552 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004553 struct trace_array *tr = iter->tr;
4554 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004555
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004556 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4557 return;
4558
Jiri Olsa62b915f2010-04-02 19:01:22 +02004559 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4560 /* print nothing if the buffers are empty */
4561 if (trace_empty(iter))
4562 return;
4563 print_trace_header(m, iter);
4564 if (!(trace_flags & TRACE_ITER_VERBOSE))
4565 print_lat_help_header(m);
4566 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004567 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4568 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004569 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004570 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004571 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004572 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004573 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004574 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004575 }
4576}
4577
Steven Rostedte0a413f2011-09-29 21:26:16 -04004578static void test_ftrace_alive(struct seq_file *m)
4579{
4580 if (!ftrace_is_dead())
4581 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004582 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4583 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004584}
4585
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004586#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004587static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004588{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004589 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4590 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4591 "# Takes a snapshot of the main buffer.\n"
4592 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4593 "# (Doesn't have to be '2' works with any number that\n"
4594 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004595}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004596
4597static void show_snapshot_percpu_help(struct seq_file *m)
4598{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004599 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004600#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004601 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4602 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004603#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004604 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4605 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004606#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004607 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4608 "# (Doesn't have to be '2' works with any number that\n"
4609 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004610}
4611
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004612static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4613{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004614 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004615 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004616 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004617 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004618
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004619 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004620 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4621 show_snapshot_main_help(m);
4622 else
4623 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004624}
4625#else
4626/* Should never be called */
4627static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4628#endif
4629
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004630static int s_show(struct seq_file *m, void *v)
4631{
4632 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004633 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004634
4635 if (iter->ent == NULL) {
4636 if (iter->tr) {
4637 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4638 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004639 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004640 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004641 if (iter->snapshot && trace_empty(iter))
4642 print_snapshot_help(m, iter);
4643 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004644 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004645 else
4646 trace_default_header(m);
4647
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004648 } else if (iter->leftover) {
4649 /*
4650 * If we filled the seq_file buffer earlier, we
4651 * want to just show it now.
4652 */
4653 ret = trace_print_seq(m, &iter->seq);
4654
4655 /* ret should this time be zero, but you never know */
4656 iter->leftover = ret;
4657
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004658 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004659 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004660 ret = trace_print_seq(m, &iter->seq);
4661 /*
4662 * If we overflow the seq_file buffer, then it will
4663 * ask us for this data again at start up.
4664 * Use that instead.
4665 * ret is 0 if seq_file write succeeded.
4666 * -1 otherwise.
4667 */
4668 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004669 }
4670
4671 return 0;
4672}
4673
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004674/*
4675 * Should be used after trace_array_get(), trace_types_lock
4676 * ensures that i_cdev was already initialized.
4677 */
4678static inline int tracing_get_cpu(struct inode *inode)
4679{
4680 if (inode->i_cdev) /* See trace_create_cpu_file() */
4681 return (long)inode->i_cdev - 1;
4682 return RING_BUFFER_ALL_CPUS;
4683}
4684
James Morris88e9d342009-09-22 16:43:43 -07004685static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004686 .start = s_start,
4687 .next = s_next,
4688 .stop = s_stop,
4689 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004690};
4691
Ingo Molnare309b412008-05-12 21:20:51 +02004692static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004693__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004694{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004695 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004696 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004697 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004698
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004699 if (tracing_disabled)
4700 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004701
Jiri Olsa50e18b92012-04-25 10:23:39 +02004702 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004703 if (!iter)
4704 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004705
Gil Fruchter72917232015-06-09 10:32:35 +03004706 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004707 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004708 if (!iter->buffer_iter)
4709 goto release;
4710
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004711 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004712 * trace_find_next_entry() may need to save off iter->ent.
4713 * It will place it into the iter->temp buffer. As most
4714 * events are less than 128, allocate a buffer of that size.
4715 * If one is greater, then trace_find_next_entry() will
4716 * allocate a new buffer to adjust for the bigger iter->ent.
4717 * It's not critical if it fails to get allocated here.
4718 */
4719 iter->temp = kmalloc(128, GFP_KERNEL);
4720 if (iter->temp)
4721 iter->temp_size = 128;
4722
4723 /*
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09004724 * trace_event_printf() may need to modify given format
4725 * string to replace %p with %px so that it shows real address
4726 * instead of hash value. However, that is only for the event
4727 * tracing, other tracer may not need. Defer the allocation
4728 * until it is needed.
4729 */
4730 iter->fmt = NULL;
4731 iter->fmt_size = 0;
4732
4733 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004734 * We make a copy of the current tracer to avoid concurrent
4735 * changes on it while we are reading.
4736 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004737 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004738 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004739 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004740 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004741
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004742 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004743
Li Zefan79f55992009-06-15 14:58:26 +08004744 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004745 goto fail;
4746
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004747 iter->tr = tr;
4748
4749#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004750 /* Currently only the top directory has a snapshot */
4751 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004752 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004753 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004754#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004755 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004756 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004757 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004758 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004759 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004760
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004761 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004762 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004763 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004764
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004765 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004766 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004767 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4768
David Sharp8be07092012-11-13 12:18:22 -08004769 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004770 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004771 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4772
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004773 /*
4774 * If pause-on-trace is enabled, then stop the trace while
4775 * dumping, unless this is the "snapshot" file
4776 */
4777 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004778 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004779
Steven Rostedtae3b5092013-01-23 15:22:59 -05004780 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004781 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004782 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004783 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004784 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004785 }
4786 ring_buffer_read_prepare_sync();
4787 for_each_tracing_cpu(cpu) {
4788 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004789 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004790 }
4791 } else {
4792 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004793 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004794 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004795 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004796 ring_buffer_read_prepare_sync();
4797 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004798 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004799 }
4800
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004801 mutex_unlock(&trace_types_lock);
4802
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004803 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004804
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004805 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004806 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004807 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004808 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004809 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004810release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004811 seq_release_private(inode, file);
4812 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004813}
4814
4815int tracing_open_generic(struct inode *inode, struct file *filp)
4816{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004817 int ret;
4818
4819 ret = tracing_check_open_get_tr(NULL);
4820 if (ret)
4821 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004822
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004823 filp->private_data = inode->i_private;
4824 return 0;
4825}
4826
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004827bool tracing_is_disabled(void)
4828{
4829 return (tracing_disabled) ? true: false;
4830}
4831
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004832/*
4833 * Open and update trace_array ref count.
4834 * Must have the current trace_array passed to it.
4835 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004836int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004837{
4838 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004839 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004840
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004841 ret = tracing_check_open_get_tr(tr);
4842 if (ret)
4843 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004844
4845 filp->private_data = inode->i_private;
4846
4847 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004848}
4849
John Keeping2972e302021-12-07 14:25:58 +00004850static int tracing_mark_open(struct inode *inode, struct file *filp)
4851{
4852 stream_open(inode, filp);
4853 return tracing_open_generic_tr(inode, filp);
4854}
4855
Hannes Eder4fd27352009-02-10 19:44:12 +01004856static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004857{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004858 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004859 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004860 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004861 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004862
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004863 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004864 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004865 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004866 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004867
Oleg Nesterov6484c712013-07-23 17:26:10 +02004868 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004869 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004870 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004871
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004872 for_each_tracing_cpu(cpu) {
4873 if (iter->buffer_iter[cpu])
4874 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4875 }
4876
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004877 if (iter->trace && iter->trace->close)
4878 iter->trace->close(iter);
4879
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004880 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004881 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004882 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004883
4884 __trace_array_put(tr);
4885
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004886 mutex_unlock(&trace_types_lock);
4887
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004888 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004889 free_cpumask_var(iter->started);
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09004890 kfree(iter->fmt);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004891 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004892 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004893 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004894 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004895
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004896 return 0;
4897}
4898
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004899static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4900{
4901 struct trace_array *tr = inode->i_private;
4902
4903 trace_array_put(tr);
4904 return 0;
4905}
4906
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004907static int tracing_single_release_tr(struct inode *inode, struct file *file)
4908{
4909 struct trace_array *tr = inode->i_private;
4910
4911 trace_array_put(tr);
4912
4913 return single_release(inode, file);
4914}
4915
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004916static int tracing_open(struct inode *inode, struct file *file)
4917{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004918 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004919 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004920 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004921
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004922 ret = tracing_check_open_get_tr(tr);
4923 if (ret)
4924 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004925
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004926 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004927 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4928 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004929 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004930
4931#ifdef CONFIG_TRACER_MAX_TRACE
4932 if (tr->current_trace->print_max)
4933 trace_buf = &tr->max_buffer;
4934#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004935
4936 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004937 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004938 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004939 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004940 }
4941
4942 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004943 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004944 if (IS_ERR(iter))
4945 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004946 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004947 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4948 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004949
4950 if (ret < 0)
4951 trace_array_put(tr);
4952
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004953 return ret;
4954}
4955
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004956/*
4957 * Some tracers are not suitable for instance buffers.
4958 * A tracer is always available for the global array (toplevel)
4959 * or if it explicitly states that it is.
4960 */
4961static bool
4962trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4963{
4964 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4965}
4966
4967/* Find the next tracer that this trace array may use */
4968static struct tracer *
4969get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4970{
4971 while (t && !trace_ok_for_array(t, tr))
4972 t = t->next;
4973
4974 return t;
4975}
4976
Ingo Molnare309b412008-05-12 21:20:51 +02004977static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004978t_next(struct seq_file *m, void *v, loff_t *pos)
4979{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004980 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004981 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004982
4983 (*pos)++;
4984
4985 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004986 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004987
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004988 return t;
4989}
4990
4991static void *t_start(struct seq_file *m, loff_t *pos)
4992{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004993 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004994 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004995 loff_t l = 0;
4996
4997 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004998
4999 t = get_tracer_for_array(tr, trace_types);
5000 for (; t && l < *pos; t = t_next(m, t, &l))
5001 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005002
5003 return t;
5004}
5005
5006static void t_stop(struct seq_file *m, void *p)
5007{
5008 mutex_unlock(&trace_types_lock);
5009}
5010
5011static int t_show(struct seq_file *m, void *v)
5012{
5013 struct tracer *t = v;
5014
5015 if (!t)
5016 return 0;
5017
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005018 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005019 if (t->next)
5020 seq_putc(m, ' ');
5021 else
5022 seq_putc(m, '\n');
5023
5024 return 0;
5025}
5026
James Morris88e9d342009-09-22 16:43:43 -07005027static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005028 .start = t_start,
5029 .next = t_next,
5030 .stop = t_stop,
5031 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005032};
5033
5034static int show_traces_open(struct inode *inode, struct file *file)
5035{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005036 struct trace_array *tr = inode->i_private;
5037 struct seq_file *m;
5038 int ret;
5039
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005040 ret = tracing_check_open_get_tr(tr);
5041 if (ret)
5042 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005043
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005044 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005045 if (ret) {
5046 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005047 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005048 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005049
5050 m = file->private_data;
5051 m->private = tr;
5052
5053 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005054}
5055
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005056static int show_traces_release(struct inode *inode, struct file *file)
5057{
5058 struct trace_array *tr = inode->i_private;
5059
5060 trace_array_put(tr);
5061 return seq_release(inode, file);
5062}
5063
Steven Rostedt4acd4d02009-03-18 10:40:24 -04005064static ssize_t
5065tracing_write_stub(struct file *filp, const char __user *ubuf,
5066 size_t count, loff_t *ppos)
5067{
5068 return count;
5069}
5070
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005071loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08005072{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005073 int ret;
5074
Slava Pestov364829b2010-11-24 15:13:16 -08005075 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005076 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08005077 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005078 file->f_pos = ret = 0;
5079
5080 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08005081}
5082
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005083static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005084 .open = tracing_open,
5085 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04005086 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005087 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005088 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005089};
5090
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005091static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005092 .open = show_traces_open,
5093 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005094 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005095 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02005096};
5097
5098static ssize_t
5099tracing_cpumask_read(struct file *filp, char __user *ubuf,
5100 size_t count, loff_t *ppos)
5101{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005102 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08005103 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005104 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02005105
Changbin Du90e406f2017-11-30 11:39:43 +08005106 len = snprintf(NULL, 0, "%*pb\n",
5107 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5108 mask_str = kmalloc(len, GFP_KERNEL);
5109 if (!mask_str)
5110 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005111
Changbin Du90e406f2017-11-30 11:39:43 +08005112 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08005113 cpumask_pr_args(tr->tracing_cpumask));
5114 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02005115 count = -EINVAL;
5116 goto out_err;
5117 }
Changbin Du90e406f2017-11-30 11:39:43 +08005118 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005119
5120out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08005121 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005122
5123 return count;
5124}
5125
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005126int tracing_set_cpumask(struct trace_array *tr,
5127 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02005128{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005129 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305130
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005131 if (!tr)
5132 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005133
Steven Rostedta5e25882008-12-02 15:34:05 -05005134 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05005135 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02005136 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02005137 /*
5138 * Increase/decrease the disabled counter if we are
5139 * about to flip a bit in the cpumask:
5140 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005141 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305142 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005143 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5144 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005145 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005146 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305147 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005148 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5149 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005150 }
5151 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05005152 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05005153 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02005154
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005155 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005156
5157 return 0;
5158}
5159
5160static ssize_t
5161tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5162 size_t count, loff_t *ppos)
5163{
5164 struct trace_array *tr = file_inode(filp)->i_private;
5165 cpumask_var_t tracing_cpumask_new;
5166 int err;
5167
Tetsuo Handac5e3a412021-04-01 14:58:23 +09005168 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005169 return -ENOMEM;
5170
5171 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5172 if (err)
5173 goto err_free;
5174
5175 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5176 if (err)
5177 goto err_free;
5178
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305179 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005180
Ingo Molnarc7078de2008-05-12 21:20:52 +02005181 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005182
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005183err_free:
Li Zefan215368e2009-06-15 10:56:42 +08005184 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005185
5186 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02005187}
5188
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005189static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005190 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02005191 .read = tracing_cpumask_read,
5192 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005193 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005194 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005195};
5196
Li Zefanfdb372e2009-12-08 11:15:59 +08005197static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005198{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005199 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005200 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005201 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005202 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005203
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005204 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005205 tracer_flags = tr->current_trace->flags->val;
5206 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005207
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005208 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005209 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08005210 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005211 else
Li Zefanfdb372e2009-12-08 11:15:59 +08005212 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005213 }
5214
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005215 for (i = 0; trace_opts[i].name; i++) {
5216 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08005217 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005218 else
Li Zefanfdb372e2009-12-08 11:15:59 +08005219 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005220 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005221 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005222
Li Zefanfdb372e2009-12-08 11:15:59 +08005223 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005224}
5225
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005226static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08005227 struct tracer_flags *tracer_flags,
5228 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005229{
Chunyu Hud39cdd22016-03-08 21:37:01 +08005230 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005231 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005232
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005233 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005234 if (ret)
5235 return ret;
5236
5237 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08005238 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005239 else
Zhaolei77708412009-08-07 18:53:21 +08005240 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005241 return 0;
5242}
5243
Li Zefan8d18eaa2009-12-08 11:17:06 +08005244/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005245static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08005246{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005247 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005248 struct tracer_flags *tracer_flags = trace->flags;
5249 struct tracer_opt *opts = NULL;
5250 int i;
5251
5252 for (i = 0; tracer_flags->opts[i].name; i++) {
5253 opts = &tracer_flags->opts[i];
5254
5255 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005256 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005257 }
5258
5259 return -EINVAL;
5260}
5261
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005262/* Some tracers require overwrite to stay enabled */
5263int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5264{
5265 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5266 return -1;
5267
5268 return 0;
5269}
5270
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005271int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005272{
Paul Burton4030a6e2021-07-01 10:24:07 -07005273 int *map;
5274
Prateek Sood3a53acf2019-12-10 09:15:16 +00005275 if ((mask == TRACE_ITER_RECORD_TGID) ||
5276 (mask == TRACE_ITER_RECORD_CMD))
5277 lockdep_assert_held(&event_mutex);
5278
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005279 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005280 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005281 return 0;
5282
5283 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005284 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05005285 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005286 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005287
5288 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005289 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005290 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005291 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08005292
5293 if (mask == TRACE_ITER_RECORD_CMD)
5294 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08005295
Joel Fernandesd914ba32017-06-26 19:01:55 -07005296 if (mask == TRACE_ITER_RECORD_TGID) {
Paul Burton4030a6e2021-07-01 10:24:07 -07005297 if (!tgid_map) {
5298 tgid_map_max = pid_max;
5299 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5300 GFP_KERNEL);
5301
5302 /*
5303 * Pairs with smp_load_acquire() in
5304 * trace_find_tgid_ptr() to ensure that if it observes
5305 * the tgid_map we just allocated then it also observes
5306 * the corresponding tgid_map_max value.
5307 */
5308 smp_store_release(&tgid_map, map);
5309 }
Joel Fernandesd914ba32017-06-26 19:01:55 -07005310 if (!tgid_map) {
5311 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5312 return -ENOMEM;
5313 }
5314
5315 trace_event_enable_tgid_record(enabled);
5316 }
5317
Steven Rostedtc37775d2016-04-13 16:59:18 -04005318 if (mask == TRACE_ITER_EVENT_FORK)
5319 trace_event_follow_fork(tr, enabled);
5320
Namhyung Kim1e104862017-04-17 11:44:28 +09005321 if (mask == TRACE_ITER_FUNC_FORK)
5322 ftrace_pid_follow_fork(tr, enabled);
5323
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005324 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005325 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005326#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005327 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005328#endif
5329 }
Steven Rostedt81698832012-10-11 10:15:05 -04005330
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04005331 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04005332 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04005333 trace_printk_control(enabled);
5334 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005335
5336 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005337}
5338
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005339int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005340{
Li Zefan8d18eaa2009-12-08 11:17:06 +08005341 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005342 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08005343 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005344 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005345 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005346
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005347 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005348
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005349 len = str_has_prefix(cmp, "no");
5350 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005351 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005352
5353 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005354
Prateek Sood3a53acf2019-12-10 09:15:16 +00005355 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005356 mutex_lock(&trace_types_lock);
5357
Yisheng Xie591a0332018-05-17 16:36:03 +08005358 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005359 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08005360 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005361 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08005362 else
5363 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005364
5365 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00005366 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005367
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005368 /*
5369 * If the first trailing whitespace is replaced with '\0' by strstrip,
5370 * turn it back into a space.
5371 */
5372 if (orig_len > strlen(option))
5373 option[strlen(option)] = ' ';
5374
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005375 return ret;
5376}
5377
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005378static void __init apply_trace_boot_options(void)
5379{
5380 char *buf = trace_boot_options_buf;
5381 char *option;
5382
5383 while (true) {
5384 option = strsep(&buf, ",");
5385
5386 if (!option)
5387 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005388
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05005389 if (*option)
5390 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005391
5392 /* Put back the comma to allow this to be called again */
5393 if (buf)
5394 *(buf - 1) = ',';
5395 }
5396}
5397
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005398static ssize_t
5399tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5400 size_t cnt, loff_t *ppos)
5401{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005402 struct seq_file *m = filp->private_data;
5403 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005404 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005405 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005406
5407 if (cnt >= sizeof(buf))
5408 return -EINVAL;
5409
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005410 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005411 return -EFAULT;
5412
Steven Rostedta8dd2172013-01-09 20:54:17 -05005413 buf[cnt] = 0;
5414
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005415 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005416 if (ret < 0)
5417 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005418
Jiri Olsacf8517c2009-10-23 19:36:16 -04005419 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005420
5421 return cnt;
5422}
5423
Li Zefanfdb372e2009-12-08 11:15:59 +08005424static int tracing_trace_options_open(struct inode *inode, struct file *file)
5425{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005426 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005427 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005428
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005429 ret = tracing_check_open_get_tr(tr);
5430 if (ret)
5431 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005432
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005433 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5434 if (ret < 0)
5435 trace_array_put(tr);
5436
5437 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08005438}
5439
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005440static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08005441 .open = tracing_trace_options_open,
5442 .read = seq_read,
5443 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005444 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05005445 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005446};
5447
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005448static const char readme_msg[] =
5449 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005450 "# echo 0 > tracing_on : quick way to disable tracing\n"
5451 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5452 " Important files:\n"
5453 " trace\t\t\t- The static contents of the buffer\n"
5454 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5455 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5456 " current_tracer\t- function and latency tracers\n"
5457 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05005458 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005459 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5460 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5461 " trace_clock\t\t-change the clock used to order events\n"
5462 " local: Per cpu clock but may not be synced across CPUs\n"
5463 " global: Synced across CPUs but slows tracing down.\n"
5464 " counter: Not a clock, but just an increment\n"
5465 " uptime: Jiffy counter from time of boot\n"
5466 " perf: Same clock that perf events use\n"
5467#ifdef CONFIG_X86_64
5468 " x86-tsc: TSC cycle counter\n"
5469#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06005470 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5471 " delta: Delta difference against a buffer-wide timestamp\n"
5472 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005473 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04005474 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005475 " tracing_cpumask\t- Limit which CPUs to trace\n"
5476 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5477 "\t\t\t Remove sub-buffer with rmdir\n"
5478 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08005479 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005480 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005481 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005482#ifdef CONFIG_DYNAMIC_FTRACE
5483 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005484 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5485 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005486 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005487 "\t modules: Can select a group via module\n"
5488 "\t Format: :mod:<module-name>\n"
5489 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5490 "\t triggers: a command to perform when function is hit\n"
5491 "\t Format: <function>:<trigger>[:count]\n"
5492 "\t trigger: traceon, traceoff\n"
5493 "\t\t enable_event:<system>:<event>\n"
5494 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005495#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005496 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005497#endif
5498#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005499 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005500#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005501 "\t\t dump\n"
5502 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005503 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5504 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5505 "\t The first one will disable tracing every time do_fault is hit\n"
5506 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5507 "\t The first time do trap is hit and it disables tracing, the\n"
5508 "\t counter will decrement to 2. If tracing is already disabled,\n"
5509 "\t the counter will not decrement. It only decrements when the\n"
5510 "\t trigger did work\n"
5511 "\t To remove trigger without count:\n"
5512 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5513 "\t To remove trigger with a count:\n"
5514 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005515 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005516 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5517 "\t modules: Can select a group via module command :mod:\n"
5518 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005519#endif /* CONFIG_DYNAMIC_FTRACE */
5520#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005521 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5522 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005523 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5524 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005525#endif
5526#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5527 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005528 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005529 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5530#endif
5531#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005532 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5533 "\t\t\t snapshot buffer. Read the contents for more\n"
5534 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005535#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005536#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005537 " stack_trace\t\t- Shows the max stack trace when active\n"
5538 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005539 "\t\t\t Write into this file to reset the max size (trigger a\n"
5540 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005541#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005542 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5543 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005544#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005545#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005546#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005547 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005548 "\t\t\t Write into this file to define/undefine new trace events.\n"
5549#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005550#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005551 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005552 "\t\t\t Write into this file to define/undefine new trace events.\n"
5553#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005554#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005555 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005556 "\t\t\t Write into this file to define/undefine new trace events.\n"
5557#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005558#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005559 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005560 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5561 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005562#ifdef CONFIG_HIST_TRIGGERS
5563 "\t s:[synthetic/]<event> <field> [<field>]\n"
5564#endif
Tzvetomir Stoyanov (VMware)7491e2c2021-08-19 11:26:06 -04005565 "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005566 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005567#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005568 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu4725cd82020-09-10 17:55:35 +09005569 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005570#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005571#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +09005572 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005573#endif
5574 "\t args: <name>=fetcharg[:type]\n"
Tzvetomir Stoyanov (VMware)7491e2c2021-08-19 11:26:06 -04005575 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005576#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005577 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005578#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005579 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005580#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005581 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005582 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005583 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005584 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005585#ifdef CONFIG_HIST_TRIGGERS
5586 "\t field: <stype> <name>;\n"
5587 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5588 "\t [unsigned] char/int/long\n"
5589#endif
Tzvetomir Stoyanov (VMware)7491e2c2021-08-19 11:26:06 -04005590 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5591 "\t of the <attached-group>/<attached-event>.\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005592#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005593 " events/\t\t- Directory containing all trace event subsystems:\n"
5594 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5595 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005596 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5597 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005598 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005599 " events/<system>/<event>/\t- Directory containing control files for\n"
5600 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005601 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5602 " filter\t\t- If set, only events passing filter are traced\n"
5603 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005604 "\t Format: <trigger>[:count][if <filter>]\n"
5605 "\t trigger: traceon, traceoff\n"
5606 "\t enable_event:<system>:<event>\n"
5607 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005608#ifdef CONFIG_HIST_TRIGGERS
5609 "\t enable_hist:<system>:<event>\n"
5610 "\t disable_hist:<system>:<event>\n"
5611#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005612#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005613 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005614#endif
5615#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005616 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005617#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005618#ifdef CONFIG_HIST_TRIGGERS
5619 "\t\t hist (see below)\n"
5620#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005621 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5622 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5623 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5624 "\t events/block/block_unplug/trigger\n"
5625 "\t The first disables tracing every time block_unplug is hit.\n"
5626 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5627 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5628 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5629 "\t Like function triggers, the counter is only decremented if it\n"
5630 "\t enabled or disabled tracing.\n"
5631 "\t To remove a trigger without a count:\n"
5632 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5633 "\t To remove a trigger with a count:\n"
5634 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5635 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005636#ifdef CONFIG_HIST_TRIGGERS
5637 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005638 "\t Format: hist:keys=<field1[,field2,...]>\n"
Kalesh Singh6a6e5ef2021-10-29 11:33:29 -07005639 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005640 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005641 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005642 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005643 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005644 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005645 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005646 "\t [if <filter>]\n\n"
Steven Rostedt (VMware)1e3bac72021-07-21 11:00:53 -04005647 "\t Note, special fields can be used as well:\n"
5648 "\t common_timestamp - to record current timestamp\n"
5649 "\t common_cpu - to record the CPU the event happened on\n"
5650 "\n"
Kalesh Singh6a6e5ef2021-10-29 11:33:29 -07005651 "\t A hist trigger variable can be:\n"
5652 "\t - a reference to a field e.g. x=current_timestamp,\n"
5653 "\t - a reference to another variable e.g. y=$x,\n"
5654 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5655 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5656 "\n"
Colin Ian Kingf2b20c62021-11-08 20:15:13 +00005657 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
Kalesh Singh6a6e5ef2021-10-29 11:33:29 -07005658 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5659 "\t variable reference, field or numeric literal.\n"
5660 "\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005661 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005662 "\t table using the key(s) and value(s) named, and the value of a\n"
5663 "\t sum called 'hitcount' is incremented. Keys and values\n"
5664 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005665 "\t can be any field, or the special string 'stacktrace'.\n"
5666 "\t Compound keys consisting of up to two fields can be specified\n"
5667 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5668 "\t fields. Sort keys consisting of up to two fields can be\n"
5669 "\t specified using the 'sort' keyword. The sort direction can\n"
5670 "\t be modified by appending '.descending' or '.ascending' to a\n"
5671 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005672 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5673 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5674 "\t its histogram data will be shared with other triggers of the\n"
5675 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005676 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005677 "\t table in its entirety to stdout. If there are multiple hist\n"
5678 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005679 "\t trigger in the output. The table displayed for a named\n"
5680 "\t trigger will be the same as any other instance having the\n"
5681 "\t same name. The default format used to display a given field\n"
5682 "\t can be modified by appending any of the following modifiers\n"
5683 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005684 "\t .hex display a number as a hex value\n"
5685 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005686 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005687 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005688 "\t .syscall display a syscall id as a syscall name\n"
5689 "\t .log2 display log2 value rather than raw number\n"
Steven Rostedt (VMware)37036432021-07-07 17:36:25 -04005690 "\t .buckets=size display values in groups of size rather than raw number\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005691 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005692 "\t The 'pause' parameter can be used to pause an existing hist\n"
5693 "\t trigger or to start a hist trigger but not log any events\n"
5694 "\t until told to do so. 'continue' can be used to start or\n"
5695 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005696 "\t The 'clear' parameter will clear the contents of a running\n"
5697 "\t hist trigger and leave its current paused/active state\n"
5698 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005699 "\t The enable_hist and disable_hist triggers can be used to\n"
5700 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005701 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005702 "\t the enable_event and disable_event triggers.\n\n"
5703 "\t Hist trigger handlers and actions are executed whenever a\n"
5704 "\t a histogram entry is added or updated. They take the form:\n\n"
5705 "\t <handler>.<action>\n\n"
5706 "\t The available handlers are:\n\n"
5707 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005708 "\t onmax(var) - invoke if var exceeds current max\n"
5709 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005710 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005711 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005712 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005713#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussi1bc36bd2020-10-04 17:14:07 -05005714 "\t snapshot() - snapshot the trace buffer\n\n"
5715#endif
5716#ifdef CONFIG_SYNTH_EVENTS
5717 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5718 "\t Write into this file to define/undefine new synthetic events.\n"
5719 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005720#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005721#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005722;
5723
5724static ssize_t
5725tracing_readme_read(struct file *filp, char __user *ubuf,
5726 size_t cnt, loff_t *ppos)
5727{
5728 return simple_read_from_buffer(ubuf, cnt, ppos,
5729 readme_msg, strlen(readme_msg));
5730}
5731
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005732static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005733 .open = tracing_open_generic,
5734 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005735 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005736};
5737
Michael Sartain99c621d2017-07-05 22:07:15 -06005738static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5739{
Paul Burtonb81b3e92021-06-29 17:34:05 -07005740 int pid = ++(*pos);
Michael Sartain99c621d2017-07-05 22:07:15 -06005741
Paul Burton4030a6e2021-07-01 10:24:07 -07005742 return trace_find_tgid_ptr(pid);
Michael Sartain99c621d2017-07-05 22:07:15 -06005743}
5744
5745static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5746{
Paul Burton4030a6e2021-07-01 10:24:07 -07005747 int pid = *pos;
Michael Sartain99c621d2017-07-05 22:07:15 -06005748
Paul Burton4030a6e2021-07-01 10:24:07 -07005749 return trace_find_tgid_ptr(pid);
Michael Sartain99c621d2017-07-05 22:07:15 -06005750}
5751
5752static void saved_tgids_stop(struct seq_file *m, void *v)
5753{
5754}
5755
5756static int saved_tgids_show(struct seq_file *m, void *v)
5757{
Paul Burtonb81b3e92021-06-29 17:34:05 -07005758 int *entry = (int *)v;
5759 int pid = entry - tgid_map;
5760 int tgid = *entry;
Michael Sartain99c621d2017-07-05 22:07:15 -06005761
Paul Burtonb81b3e92021-06-29 17:34:05 -07005762 if (tgid == 0)
5763 return SEQ_SKIP;
5764
5765 seq_printf(m, "%d %d\n", pid, tgid);
Michael Sartain99c621d2017-07-05 22:07:15 -06005766 return 0;
5767}
5768
5769static const struct seq_operations tracing_saved_tgids_seq_ops = {
5770 .start = saved_tgids_start,
5771 .stop = saved_tgids_stop,
5772 .next = saved_tgids_next,
5773 .show = saved_tgids_show,
5774};
5775
5776static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5777{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005778 int ret;
5779
5780 ret = tracing_check_open_get_tr(NULL);
5781 if (ret)
5782 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005783
5784 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5785}
5786
5787
5788static const struct file_operations tracing_saved_tgids_fops = {
5789 .open = tracing_saved_tgids_open,
5790 .read = seq_read,
5791 .llseek = seq_lseek,
5792 .release = seq_release,
5793};
5794
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005795static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005796{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005797 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005798
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005799 if (*pos || m->count)
5800 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005801
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005802 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005803
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005804 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5805 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005806 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005807 continue;
5808
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005809 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005810 }
5811
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005812 return NULL;
5813}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005814
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005815static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5816{
5817 void *v;
5818 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005819
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005820 preempt_disable();
5821 arch_spin_lock(&trace_cmdline_lock);
5822
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005823 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005824 while (l <= *pos) {
5825 v = saved_cmdlines_next(m, v, &l);
5826 if (!v)
5827 return NULL;
5828 }
5829
5830 return v;
5831}
5832
5833static void saved_cmdlines_stop(struct seq_file *m, void *v)
5834{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005835 arch_spin_unlock(&trace_cmdline_lock);
5836 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005837}
5838
5839static int saved_cmdlines_show(struct seq_file *m, void *v)
5840{
5841 char buf[TASK_COMM_LEN];
5842 unsigned int *pid = v;
5843
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005844 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005845 seq_printf(m, "%d %s\n", *pid, buf);
5846 return 0;
5847}
5848
5849static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5850 .start = saved_cmdlines_start,
5851 .next = saved_cmdlines_next,
5852 .stop = saved_cmdlines_stop,
5853 .show = saved_cmdlines_show,
5854};
5855
5856static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5857{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005858 int ret;
5859
5860 ret = tracing_check_open_get_tr(NULL);
5861 if (ret)
5862 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005863
5864 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005865}
5866
5867static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005868 .open = tracing_saved_cmdlines_open,
5869 .read = seq_read,
5870 .llseek = seq_lseek,
5871 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005872};
5873
5874static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005875tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5876 size_t cnt, loff_t *ppos)
5877{
5878 char buf[64];
5879 int r;
5880
5881 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005882 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005883 arch_spin_unlock(&trace_cmdline_lock);
5884
5885 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5886}
5887
5888static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5889{
5890 kfree(s->saved_cmdlines);
5891 kfree(s->map_cmdline_to_pid);
5892 kfree(s);
5893}
5894
5895static int tracing_resize_saved_cmdlines(unsigned int val)
5896{
5897 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5898
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005899 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005900 if (!s)
5901 return -ENOMEM;
5902
5903 if (allocate_cmdlines_buffer(val, s) < 0) {
5904 kfree(s);
5905 return -ENOMEM;
5906 }
5907
5908 arch_spin_lock(&trace_cmdline_lock);
5909 savedcmd_temp = savedcmd;
5910 savedcmd = s;
5911 arch_spin_unlock(&trace_cmdline_lock);
5912 free_saved_cmdlines_buffer(savedcmd_temp);
5913
5914 return 0;
5915}
5916
5917static ssize_t
5918tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5919 size_t cnt, loff_t *ppos)
5920{
5921 unsigned long val;
5922 int ret;
5923
5924 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5925 if (ret)
5926 return ret;
5927
5928 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5929 if (!val || val > PID_MAX_DEFAULT)
5930 return -EINVAL;
5931
5932 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5933 if (ret < 0)
5934 return ret;
5935
5936 *ppos += cnt;
5937
5938 return cnt;
5939}
5940
5941static const struct file_operations tracing_saved_cmdlines_size_fops = {
5942 .open = tracing_open_generic,
5943 .read = tracing_saved_cmdlines_size_read,
5944 .write = tracing_saved_cmdlines_size_write,
5945};
5946
Jeremy Linton681bec02017-05-31 16:56:53 -05005947#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005948static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005949update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005950{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005951 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005952 if (ptr->tail.next) {
5953 ptr = ptr->tail.next;
5954 /* Set ptr to the next real item (skip head) */
5955 ptr++;
5956 } else
5957 return NULL;
5958 }
5959 return ptr;
5960}
5961
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005962static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005963{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005964 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005965
5966 /*
5967 * Paranoid! If ptr points to end, we don't want to increment past it.
5968 * This really should never happen.
5969 */
Vasily Averin039958a2020-01-24 10:03:01 +03005970 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005971 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005972 if (WARN_ON_ONCE(!ptr))
5973 return NULL;
5974
5975 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005976 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005977
5978 return ptr;
5979}
5980
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005981static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005982{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005983 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005984 loff_t l = 0;
5985
Jeremy Linton1793ed92017-05-31 16:56:46 -05005986 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005987
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005988 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005989 if (v)
5990 v++;
5991
5992 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005993 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005994 }
5995
5996 return v;
5997}
5998
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005999static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006000{
Jeremy Linton1793ed92017-05-31 16:56:46 -05006001 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006002}
6003
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006004static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006005{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006006 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006007
6008 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05006009 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006010 ptr->map.system);
6011
6012 return 0;
6013}
6014
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006015static const struct seq_operations tracing_eval_map_seq_ops = {
6016 .start = eval_map_start,
6017 .next = eval_map_next,
6018 .stop = eval_map_stop,
6019 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006020};
6021
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006022static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006023{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006024 int ret;
6025
6026 ret = tracing_check_open_get_tr(NULL);
6027 if (ret)
6028 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006029
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006030 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006031}
6032
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006033static const struct file_operations tracing_eval_map_fops = {
6034 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006035 .read = seq_read,
6036 .llseek = seq_lseek,
6037 .release = seq_release,
6038};
6039
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006040static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05006041trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006042{
6043 /* Return tail of array given the head */
6044 return ptr + ptr->head.length + 1;
6045}
6046
6047static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006048trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006049 int len)
6050{
Jeremy Linton00f4b652017-05-31 16:56:43 -05006051 struct trace_eval_map **stop;
6052 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006053 union trace_eval_map_item *map_array;
6054 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006055
6056 stop = start + len;
6057
6058 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006059 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006060 * where the head holds the module and length of array, and the
6061 * tail holds a pointer to the next list.
6062 */
Kees Cook6da2ec52018-06-12 13:55:00 -07006063 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006064 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006065 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006066 return;
6067 }
6068
Jeremy Linton1793ed92017-05-31 16:56:46 -05006069 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006070
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006071 if (!trace_eval_maps)
6072 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006073 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006074 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006075 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05006076 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006077 if (!ptr->tail.next)
6078 break;
6079 ptr = ptr->tail.next;
6080
6081 }
6082 ptr->tail.next = map_array;
6083 }
6084 map_array->head.mod = mod;
6085 map_array->head.length = len;
6086 map_array++;
6087
6088 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6089 map_array->map = **map;
6090 map_array++;
6091 }
6092 memset(map_array, 0, sizeof(*map_array));
6093
Jeremy Linton1793ed92017-05-31 16:56:46 -05006094 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006095}
6096
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006097static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006098{
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04006099 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006100 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006101}
6102
Jeremy Linton681bec02017-05-31 16:56:53 -05006103#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006104static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6105static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05006106 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05006107#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006108
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006109static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05006110 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006111{
Jeremy Linton00f4b652017-05-31 16:56:43 -05006112 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006113
6114 if (len <= 0)
6115 return;
6116
6117 map = start;
6118
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006119 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006120
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006121 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006122}
6123
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006124static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006125tracing_set_trace_read(struct file *filp, char __user *ubuf,
6126 size_t cnt, loff_t *ppos)
6127{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006128 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006129 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006130 int r;
6131
6132 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006133 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006134 mutex_unlock(&trace_types_lock);
6135
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006136 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006137}
6138
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006139int tracer_init(struct tracer *t, struct trace_array *tr)
6140{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006141 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006142 return t->init(tr);
6143}
6144
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006145static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006146{
6147 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006148
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006149 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006150 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006151}
6152
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006153#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09006154/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006155static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6156 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09006157{
6158 int cpu, ret = 0;
6159
6160 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6161 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006162 ret = ring_buffer_resize(trace_buf->buffer,
6163 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006164 if (ret < 0)
6165 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006166 per_cpu_ptr(trace_buf->data, cpu)->entries =
6167 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09006168 }
6169 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006170 ret = ring_buffer_resize(trace_buf->buffer,
6171 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006172 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006173 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6174 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09006175 }
6176
6177 return ret;
6178}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006179#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09006180
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006181static int __tracing_resize_ring_buffer(struct trace_array *tr,
6182 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04006183{
6184 int ret;
6185
6186 /*
6187 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04006188 * we use the size that was given, and we can forget about
6189 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04006190 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006191 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04006192
Steven Rostedtb382ede62012-10-10 21:44:34 -04006193 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006194 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04006195 return 0;
6196
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006197 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006198 if (ret < 0)
6199 return ret;
6200
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006201#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006202 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6203 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006204 goto out;
6205
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006206 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006207 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006208 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6209 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006210 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04006211 /*
6212 * AARGH! We are left with different
6213 * size max buffer!!!!
6214 * The max buffer is our "snapshot" buffer.
6215 * When a tracer needs a snapshot (one of the
6216 * latency tracers), it swaps the max buffer
6217 * with the saved snap shot. We succeeded to
6218 * update the size of the main buffer, but failed to
6219 * update the size of the max buffer. But when we tried
6220 * to reset the main buffer to the original size, we
6221 * failed there too. This is very unlikely to
6222 * happen, but if it does, warn and kill all
6223 * tracing.
6224 */
Steven Rostedt73c51622009-03-11 13:42:01 -04006225 WARN_ON(1);
6226 tracing_disabled = 1;
6227 }
6228 return ret;
6229 }
6230
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006231 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006232 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006233 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006234 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006235
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006236 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006237#endif /* CONFIG_TRACER_MAX_TRACE */
6238
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006239 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006240 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006241 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006242 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04006243
6244 return ret;
6245}
6246
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09006247ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6248 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006249{
Colin Ian King08b0c9b2021-05-13 12:55:17 +01006250 int ret;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006251
6252 mutex_lock(&trace_types_lock);
6253
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006254 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6255 /* make sure, this cpu is enabled in the mask */
6256 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6257 ret = -EINVAL;
6258 goto out;
6259 }
6260 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006261
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006262 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006263 if (ret < 0)
6264 ret = -ENOMEM;
6265
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006266out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006267 mutex_unlock(&trace_types_lock);
6268
6269 return ret;
6270}
6271
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006272
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006273/**
6274 * tracing_update_buffers - used by tracing facility to expand ring buffers
6275 *
6276 * To save on memory when the tracing is never used on a system with it
6277 * configured in. The ring buffers are set to a minimum size. But once
6278 * a user starts to use the tracing facility, then they need to grow
6279 * to their default size.
6280 *
6281 * This function is to be called when a tracer is about to be used.
6282 */
6283int tracing_update_buffers(void)
6284{
6285 int ret = 0;
6286
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006287 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006288 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006289 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006290 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006291 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006292
6293 return ret;
6294}
6295
Steven Rostedt577b7852009-02-26 23:43:05 -05006296struct trace_option_dentry;
6297
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006298static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006299create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05006300
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006301/*
6302 * Used to clear out the tracer before deletion of an instance.
6303 * Must have trace_types_lock held.
6304 */
6305static void tracing_set_nop(struct trace_array *tr)
6306{
6307 if (tr->current_trace == &nop_trace)
6308 return;
6309
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006310 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006311
6312 if (tr->current_trace->reset)
6313 tr->current_trace->reset(tr);
6314
6315 tr->current_trace = &nop_trace;
6316}
6317
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006318static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006319{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006320 /* Only enable if the directory has been created already. */
6321 if (!tr->dir)
6322 return;
6323
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006324 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006325}
6326
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09006327int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006328{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006329 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006330#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05006331 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006332#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006333 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006334
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006335 mutex_lock(&trace_types_lock);
6336
Steven Rostedt73c51622009-03-11 13:42:01 -04006337 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006338 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006339 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04006340 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01006341 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04006342 ret = 0;
6343 }
6344
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006345 for (t = trace_types; t; t = t->next) {
6346 if (strcmp(t->name, buf) == 0)
6347 break;
6348 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006349 if (!t) {
6350 ret = -EINVAL;
6351 goto out;
6352 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006353 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006354 goto out;
6355
Tom Zanussia35873a2019-02-13 17:42:45 -06006356#ifdef CONFIG_TRACER_SNAPSHOT
6357 if (t->use_max_tr) {
6358 arch_spin_lock(&tr->max_lock);
6359 if (tr->cond_snapshot)
6360 ret = -EBUSY;
6361 arch_spin_unlock(&tr->max_lock);
6362 if (ret)
6363 goto out;
6364 }
6365#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08006366 /* Some tracers won't work on kernel command line */
6367 if (system_state < SYSTEM_RUNNING && t->noboot) {
6368 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6369 t->name);
6370 goto out;
6371 }
6372
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006373 /* Some tracers are only allowed for the top level buffer */
6374 if (!trace_ok_for_array(t, tr)) {
6375 ret = -EINVAL;
6376 goto out;
6377 }
6378
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006379 /* If trace pipe files are being read, we can't change the tracer */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006380 if (tr->trace_ref) {
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006381 ret = -EBUSY;
6382 goto out;
6383 }
6384
Steven Rostedt9f029e82008-11-12 15:24:24 -05006385 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006386
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006387 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006388
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006389 if (tr->current_trace->reset)
6390 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05006391
Paul E. McKenney74401722018-11-06 18:44:52 -08006392 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006393 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05006394
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006395#ifdef CONFIG_TRACER_MAX_TRACE
6396 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05006397
6398 if (had_max_tr && !t->use_max_tr) {
6399 /*
6400 * We need to make sure that the update_max_tr sees that
6401 * current_trace changed to nop_trace to keep it from
6402 * swapping the buffers after we resize it.
6403 * The update_max_tr is called from interrupts disabled
6404 * so a synchronized_sched() is sufficient.
6405 */
Paul E. McKenney74401722018-11-06 18:44:52 -08006406 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006407 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006408 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006409#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006410
6411#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05006412 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006413 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006414 if (ret < 0)
6415 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006416 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006417#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05006418
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006419 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006420 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006421 if (ret)
6422 goto out;
6423 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006424
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006425 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006426 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05006427 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006428 out:
6429 mutex_unlock(&trace_types_lock);
6430
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006431 return ret;
6432}
6433
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006434static ssize_t
6435tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6436 size_t cnt, loff_t *ppos)
6437{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006438 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006439 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006440 int i;
6441 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006442 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006443
Steven Rostedt60063a62008-10-28 10:44:24 -04006444 ret = cnt;
6445
Li Zefanee6c2c12009-09-18 14:06:47 +08006446 if (cnt > MAX_TRACER_SIZE)
6447 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006448
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006449 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006450 return -EFAULT;
6451
6452 buf[cnt] = 0;
6453
6454 /* strip ending whitespace. */
6455 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6456 buf[i] = 0;
6457
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006458 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006459 if (err)
6460 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006461
Jiri Olsacf8517c2009-10-23 19:36:16 -04006462 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006463
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006464 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006465}
6466
6467static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006468tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6469 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006470{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006471 char buf[64];
6472 int r;
6473
Steven Rostedtcffae432008-05-12 21:21:00 +02006474 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006475 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02006476 if (r > sizeof(buf))
6477 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006478 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006479}
6480
6481static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006482tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6483 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006484{
Hannes Eder5e398412009-02-10 19:44:34 +01006485 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006486 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006487
Peter Huewe22fe9b52011-06-07 21:58:27 +02006488 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6489 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006490 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006491
6492 *ptr = val * 1000;
6493
6494 return cnt;
6495}
6496
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006497static ssize_t
6498tracing_thresh_read(struct file *filp, char __user *ubuf,
6499 size_t cnt, loff_t *ppos)
6500{
6501 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6502}
6503
6504static ssize_t
6505tracing_thresh_write(struct file *filp, const char __user *ubuf,
6506 size_t cnt, loff_t *ppos)
6507{
6508 struct trace_array *tr = filp->private_data;
6509 int ret;
6510
6511 mutex_lock(&trace_types_lock);
6512 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6513 if (ret < 0)
6514 goto out;
6515
6516 if (tr->current_trace->update_thresh) {
6517 ret = tr->current_trace->update_thresh(tr);
6518 if (ret < 0)
6519 goto out;
6520 }
6521
6522 ret = cnt;
6523out:
6524 mutex_unlock(&trace_types_lock);
6525
6526 return ret;
6527}
6528
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006529#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006530
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006531static ssize_t
6532tracing_max_lat_read(struct file *filp, char __user *ubuf,
6533 size_t cnt, loff_t *ppos)
6534{
6535 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6536}
6537
6538static ssize_t
6539tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6540 size_t cnt, loff_t *ppos)
6541{
6542 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6543}
6544
Chen Gange428abb2015-11-10 05:15:15 +08006545#endif
6546
Steven Rostedtb3806b42008-05-12 21:20:46 +02006547static int tracing_open_pipe(struct inode *inode, struct file *filp)
6548{
Oleg Nesterov15544202013-07-23 17:25:57 +02006549 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006550 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006551 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006552
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006553 ret = tracing_check_open_get_tr(tr);
6554 if (ret)
6555 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006556
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006557 mutex_lock(&trace_types_lock);
6558
Steven Rostedtb3806b42008-05-12 21:20:46 +02006559 /* create a buffer to store the information to pass to userspace */
6560 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006561 if (!iter) {
6562 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006563 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006564 goto out;
6565 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006566
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006567 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006568 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006569
6570 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6571 ret = -ENOMEM;
6572 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306573 }
6574
Steven Rostedta3097202008-11-07 22:36:02 -05006575 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306576 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006577
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006578 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006579 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6580
David Sharp8be07092012-11-13 12:18:22 -08006581 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006582 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006583 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6584
Oleg Nesterov15544202013-07-23 17:25:57 +02006585 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006586 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006587 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006588 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006589 filp->private_data = iter;
6590
Steven Rostedt107bad82008-05-12 21:21:01 +02006591 if (iter->trace->pipe_open)
6592 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006593
Arnd Bergmannb4447862010-07-07 23:40:11 +02006594 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006595
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006596 tr->trace_ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006597out:
6598 mutex_unlock(&trace_types_lock);
6599 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006600
6601fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006602 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006603 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006604 mutex_unlock(&trace_types_lock);
6605 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006606}
6607
6608static int tracing_release_pipe(struct inode *inode, struct file *file)
6609{
6610 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006611 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006612
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006613 mutex_lock(&trace_types_lock);
6614
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006615 tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006616
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006617 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006618 iter->trace->pipe_close(iter);
6619
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006620 mutex_unlock(&trace_types_lock);
6621
Rusty Russell44623442009-01-01 10:12:23 +10306622 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006623 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006624 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006625
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006626 trace_array_put(tr);
6627
Steven Rostedtb3806b42008-05-12 21:20:46 +02006628 return 0;
6629}
6630
Al Viro9dd95742017-07-03 00:42:43 -04006631static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006632trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006633{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006634 struct trace_array *tr = iter->tr;
6635
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006636 /* Iterators are static, they should be filled or empty */
6637 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006638 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006639
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006640 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006641 /*
6642 * Always select as readable when in blocking mode
6643 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006644 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006645 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006646 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006647 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006648}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006649
Al Viro9dd95742017-07-03 00:42:43 -04006650static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006651tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6652{
6653 struct trace_iterator *iter = filp->private_data;
6654
6655 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006656}
6657
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006658/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006659static int tracing_wait_pipe(struct file *filp)
6660{
6661 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006662 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006663
6664 while (trace_empty(iter)) {
6665
6666 if ((filp->f_flags & O_NONBLOCK)) {
6667 return -EAGAIN;
6668 }
6669
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006670 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006671 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006672 * We still block if tracing is disabled, but we have never
6673 * read anything. This allows a user to cat this file, and
6674 * then enable tracing. But after we have read something,
6675 * we give an EOF when tracing is again disabled.
6676 *
6677 * iter->pos will be 0 if we haven't read anything.
6678 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006679 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006680 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006681
6682 mutex_unlock(&iter->mutex);
6683
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006684 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006685
6686 mutex_lock(&iter->mutex);
6687
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006688 if (ret)
6689 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006690 }
6691
6692 return 1;
6693}
6694
Steven Rostedtb3806b42008-05-12 21:20:46 +02006695/*
6696 * Consumer reader.
6697 */
6698static ssize_t
6699tracing_read_pipe(struct file *filp, char __user *ubuf,
6700 size_t cnt, loff_t *ppos)
6701{
6702 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006703 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006704
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006705 /*
6706 * Avoid more than one consumer on a single file descriptor
6707 * This is just a matter of traces coherency, the ring buffer itself
6708 * is protected.
6709 */
6710 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006711
6712 /* return any leftover data */
6713 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6714 if (sret != -EBUSY)
6715 goto out;
6716
6717 trace_seq_init(&iter->seq);
6718
Steven Rostedt107bad82008-05-12 21:21:01 +02006719 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006720 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6721 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006722 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006723 }
6724
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006725waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006726 sret = tracing_wait_pipe(filp);
6727 if (sret <= 0)
6728 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006729
6730 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006731 if (trace_empty(iter)) {
6732 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006733 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006734 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006735
6736 if (cnt >= PAGE_SIZE)
6737 cnt = PAGE_SIZE - 1;
6738
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006739 /* reset all but tr, trace, and overruns */
Steven Rostedt (VMware)2768c1e2021-12-10 20:26:16 -05006740 trace_iterator_reset(iter);
Andrew Vagined5467d2013-08-02 21:16:43 +04006741 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006742 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006743
Lai Jiangshan4f535962009-05-18 19:35:34 +08006744 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006745 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006746 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006747 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006748 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006749
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006750 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006751 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006752 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006753 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006754 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006755 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006756 if (ret != TRACE_TYPE_NO_CONSUME)
6757 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006758
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006759 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006760 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006761
6762 /*
6763 * Setting the full flag means we reached the trace_seq buffer
6764 * size and we should leave by partial output condition above.
6765 * One of the trace_seq_* functions is not used properly.
6766 */
6767 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6768 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006769 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006770 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006771 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006772
Steven Rostedtb3806b42008-05-12 21:20:46 +02006773 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006774 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006775 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006776 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006777
6778 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006779 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006780 * entries, go back to wait for more entries.
6781 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006782 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006783 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006784
Steven Rostedt107bad82008-05-12 21:21:01 +02006785out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006786 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006787
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006788 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006789}
6790
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006791static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6792 unsigned int idx)
6793{
6794 __free_page(spd->pages[idx]);
6795}
6796
Steven Rostedt34cd4992009-02-09 12:06:29 -05006797static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006798tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006799{
6800 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006801 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006802 int ret;
6803
6804 /* Seq buffer is page-sized, exactly what we need. */
6805 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006806 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006807 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006808
6809 if (trace_seq_has_overflowed(&iter->seq)) {
6810 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006811 break;
6812 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006813
6814 /*
6815 * This should not be hit, because it should only
6816 * be set if the iter->seq overflowed. But check it
6817 * anyway to be safe.
6818 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006819 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006820 iter->seq.seq.len = save_len;
6821 break;
6822 }
6823
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006824 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006825 if (rem < count) {
6826 rem = 0;
6827 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006828 break;
6829 }
6830
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006831 if (ret != TRACE_TYPE_NO_CONSUME)
6832 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006833 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006834 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006835 rem = 0;
6836 iter->ent = NULL;
6837 break;
6838 }
6839 }
6840
6841 return rem;
6842}
6843
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006844static ssize_t tracing_splice_read_pipe(struct file *filp,
6845 loff_t *ppos,
6846 struct pipe_inode_info *pipe,
6847 size_t len,
6848 unsigned int flags)
6849{
Jens Axboe35f3d142010-05-20 10:43:18 +02006850 struct page *pages_def[PIPE_DEF_BUFFERS];
6851 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006852 struct trace_iterator *iter = filp->private_data;
6853 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006854 .pages = pages_def,
6855 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006856 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006857 .nr_pages_max = PIPE_DEF_BUFFERS,
Christoph Hellwig6797d972020-05-20 17:58:13 +02006858 .ops = &default_pipe_buf_ops,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006859 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006860 };
6861 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006862 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006863 unsigned int i;
6864
Jens Axboe35f3d142010-05-20 10:43:18 +02006865 if (splice_grow_spd(pipe, &spd))
6866 return -ENOMEM;
6867
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006868 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006869
6870 if (iter->trace->splice_read) {
6871 ret = iter->trace->splice_read(iter, filp,
6872 ppos, pipe, len, flags);
6873 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006874 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006875 }
6876
6877 ret = tracing_wait_pipe(filp);
6878 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006879 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006880
Jason Wessel955b61e2010-08-05 09:22:23 -05006881 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006882 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006883 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006884 }
6885
Lai Jiangshan4f535962009-05-18 19:35:34 +08006886 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006887 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006888
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006889 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006890 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006891 spd.pages[i] = alloc_page(GFP_KERNEL);
6892 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006893 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006894
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006895 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006896
6897 /* Copy the data into the page, so we can start over. */
6898 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006899 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006900 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006901 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006902 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006903 break;
6904 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006905 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006906 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006907
Steven Rostedtf9520752009-03-02 14:04:40 -05006908 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006909 }
6910
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006911 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006912 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006913 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006914
6915 spd.nr_pages = i;
6916
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006917 if (i)
6918 ret = splice_to_pipe(pipe, &spd);
6919 else
6920 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006921out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006922 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006923 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006924
Steven Rostedt34cd4992009-02-09 12:06:29 -05006925out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006926 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006927 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006928}
6929
Steven Rostedta98a3c32008-05-12 21:20:59 +02006930static ssize_t
6931tracing_entries_read(struct file *filp, char __user *ubuf,
6932 size_t cnt, loff_t *ppos)
6933{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006934 struct inode *inode = file_inode(filp);
6935 struct trace_array *tr = inode->i_private;
6936 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006937 char buf[64];
6938 int r = 0;
6939 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006940
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006941 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006942
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006943 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006944 int cpu, buf_size_same;
6945 unsigned long size;
6946
6947 size = 0;
6948 buf_size_same = 1;
6949 /* check if all cpu sizes are same */
6950 for_each_tracing_cpu(cpu) {
6951 /* fill in the size from first enabled cpu */
6952 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006953 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6954 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006955 buf_size_same = 0;
6956 break;
6957 }
6958 }
6959
6960 if (buf_size_same) {
6961 if (!ring_buffer_expanded)
6962 r = sprintf(buf, "%lu (expanded: %lu)\n",
6963 size >> 10,
6964 trace_buf_size >> 10);
6965 else
6966 r = sprintf(buf, "%lu\n", size >> 10);
6967 } else
6968 r = sprintf(buf, "X\n");
6969 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006970 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006971
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006972 mutex_unlock(&trace_types_lock);
6973
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006974 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6975 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006976}
6977
6978static ssize_t
6979tracing_entries_write(struct file *filp, const char __user *ubuf,
6980 size_t cnt, loff_t *ppos)
6981{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006982 struct inode *inode = file_inode(filp);
6983 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006984 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006985 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006986
Peter Huewe22fe9b52011-06-07 21:58:27 +02006987 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6988 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006989 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006990
6991 /* must have at least 1 entry */
6992 if (!val)
6993 return -EINVAL;
6994
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006995 /* value is in KB */
6996 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006997 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006998 if (ret < 0)
6999 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02007000
Jiri Olsacf8517c2009-10-23 19:36:16 -04007001 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02007002
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007003 return cnt;
7004}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05007005
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007006static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007007tracing_total_entries_read(struct file *filp, char __user *ubuf,
7008 size_t cnt, loff_t *ppos)
7009{
7010 struct trace_array *tr = filp->private_data;
7011 char buf[64];
7012 int r, cpu;
7013 unsigned long size = 0, expanded_size = 0;
7014
7015 mutex_lock(&trace_types_lock);
7016 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007017 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007018 if (!ring_buffer_expanded)
7019 expanded_size += trace_buf_size >> 10;
7020 }
7021 if (ring_buffer_expanded)
7022 r = sprintf(buf, "%lu\n", size);
7023 else
7024 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7025 mutex_unlock(&trace_types_lock);
7026
7027 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7028}
7029
7030static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007031tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7032 size_t cnt, loff_t *ppos)
7033{
7034 /*
7035 * There is no need to read what the user has written, this function
7036 * is just to make sure that there is no error when "echo" is used
7037 */
7038
7039 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02007040
7041 return cnt;
7042}
7043
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007044static int
7045tracing_free_buffer_release(struct inode *inode, struct file *filp)
7046{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007047 struct trace_array *tr = inode->i_private;
7048
Steven Rostedtcf30cf62011-06-14 22:44:07 -04007049 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007050 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07007051 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007052 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007053 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007054
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007055 trace_array_put(tr);
7056
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007057 return 0;
7058}
7059
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007060static ssize_t
7061tracing_mark_write(struct file *filp, const char __user *ubuf,
7062 size_t cnt, loff_t *fpos)
7063{
Alexander Z Lam2d716192013-07-01 15:31:24 -07007064 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04007065 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007066 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007067 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04007068 struct print_entry *entry;
Steven Rostedtd696b582011-09-22 11:50:27 -04007069 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04007070 int size;
7071 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007072
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007073/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007074#define FAULTED_STR "<faulted>"
7075#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007076
Steven Rostedtc76f0692008-11-07 22:36:02 -05007077 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007078 return -EINVAL;
7079
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007080 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07007081 return -EINVAL;
7082
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007083 if (cnt > TRACE_BUF_SIZE)
7084 cnt = TRACE_BUF_SIZE;
7085
Steven Rostedtd696b582011-09-22 11:50:27 -04007086 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007087
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007088 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7089
7090 /* If less than "<faulted>", then make sure we can still add that */
7091 if (cnt < FAULTED_SIZE)
7092 size += FAULTED_SIZE - cnt;
7093
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007094 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05007095 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01007096 tracing_gen_ctx());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007097 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04007098 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007099 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04007100
7101 entry = ring_buffer_event_data(event);
7102 entry->ip = _THIS_IP_;
7103
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007104 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7105 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007106 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007107 cnt = FAULTED_SIZE;
7108 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04007109 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007110 written = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04007111
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007112 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7113 /* do not add \n before testing triggers, but add \0 */
7114 entry->buf[cnt] = '\0';
Steven Rostedt (VMware)b47e3302021-03-16 12:41:03 -04007115 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007116 }
7117
Steven Rostedtd696b582011-09-22 11:50:27 -04007118 if (entry->buf[cnt - 1] != '\n') {
7119 entry->buf[cnt] = '\n';
7120 entry->buf[cnt + 1] = '\0';
7121 } else
7122 entry->buf[cnt] = '\0';
7123
Tingwei Zhang458999c2020-10-05 10:13:15 +03007124 if (static_branch_unlikely(&trace_marker_exports_enabled))
7125 ftrace_exports(event, TRACE_EXPORT_MARKER);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04007126 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04007127
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007128 if (tt)
7129 event_triggers_post_call(tr->trace_marker_file, tt);
7130
Steven Rostedtfa32e852016-07-06 15:25:08 -04007131 return written;
7132}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007133
Steven Rostedtfa32e852016-07-06 15:25:08 -04007134/* Limit it for now to 3K (including tag) */
7135#define RAW_DATA_MAX_SIZE (1024*3)
7136
7137static ssize_t
7138tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7139 size_t cnt, loff_t *fpos)
7140{
7141 struct trace_array *tr = filp->private_data;
7142 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007143 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007144 struct raw_data_entry *entry;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007145 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007146 int size;
7147 int len;
7148
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007149#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7150
Steven Rostedtfa32e852016-07-06 15:25:08 -04007151 if (tracing_disabled)
7152 return -EINVAL;
7153
7154 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7155 return -EINVAL;
7156
7157 /* The marker must at least have a tag id */
7158 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7159 return -EINVAL;
7160
7161 if (cnt > TRACE_BUF_SIZE)
7162 cnt = TRACE_BUF_SIZE;
7163
7164 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7165
Steven Rostedtfa32e852016-07-06 15:25:08 -04007166 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007167 if (cnt < FAULT_SIZE_ID)
7168 size += FAULT_SIZE_ID - cnt;
7169
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007170 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05007171 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01007172 tracing_gen_ctx());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007173 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04007174 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007175 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007176
7177 entry = ring_buffer_event_data(event);
7178
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007179 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7180 if (len) {
7181 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007182 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007183 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007184 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007185 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007186
7187 __buffer_unlock_commit(buffer, event);
7188
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02007189 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007190}
7191
Li Zefan13f16d22009-12-08 11:16:11 +08007192static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08007193{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007194 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08007195 int i;
7196
7197 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08007198 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08007199 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007200 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7201 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08007202 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08007203
Li Zefan13f16d22009-12-08 11:16:11 +08007204 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08007205}
7206
Tom Zanussid71bd342018-01-15 20:52:07 -06007207int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08007208{
Zhaolei5079f322009-08-25 16:12:56 +08007209 int i;
7210
Zhaolei5079f322009-08-25 16:12:56 +08007211 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7212 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7213 break;
7214 }
7215 if (i == ARRAY_SIZE(trace_clocks))
7216 return -EINVAL;
7217
Zhaolei5079f322009-08-25 16:12:56 +08007218 mutex_lock(&trace_types_lock);
7219
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007220 tr->clock_id = i;
7221
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007222 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08007223
David Sharp60303ed2012-10-11 16:27:52 -07007224 /*
7225 * New clock may not be consistent with the previous clock.
7226 * Reset the buffer so that it doesn't have incomparable timestamps.
7227 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007228 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007229
7230#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05007231 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007232 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07007233 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007234#endif
David Sharp60303ed2012-10-11 16:27:52 -07007235
Zhaolei5079f322009-08-25 16:12:56 +08007236 mutex_unlock(&trace_types_lock);
7237
Steven Rostedte1e232c2014-02-10 23:38:46 -05007238 return 0;
7239}
7240
7241static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7242 size_t cnt, loff_t *fpos)
7243{
7244 struct seq_file *m = filp->private_data;
7245 struct trace_array *tr = m->private;
7246 char buf[64];
7247 const char *clockstr;
7248 int ret;
7249
7250 if (cnt >= sizeof(buf))
7251 return -EINVAL;
7252
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08007253 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05007254 return -EFAULT;
7255
7256 buf[cnt] = 0;
7257
7258 clockstr = strstrip(buf);
7259
7260 ret = tracing_set_clock(tr, clockstr);
7261 if (ret)
7262 return ret;
7263
Zhaolei5079f322009-08-25 16:12:56 +08007264 *fpos += cnt;
7265
7266 return cnt;
7267}
7268
Li Zefan13f16d22009-12-08 11:16:11 +08007269static int tracing_clock_open(struct inode *inode, struct file *file)
7270{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007271 struct trace_array *tr = inode->i_private;
7272 int ret;
7273
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007274 ret = tracing_check_open_get_tr(tr);
7275 if (ret)
7276 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007277
7278 ret = single_open(file, tracing_clock_show, inode->i_private);
7279 if (ret < 0)
7280 trace_array_put(tr);
7281
7282 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08007283}
7284
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007285static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7286{
7287 struct trace_array *tr = m->private;
7288
7289 mutex_lock(&trace_types_lock);
7290
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007291 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007292 seq_puts(m, "delta [absolute]\n");
7293 else
7294 seq_puts(m, "[delta] absolute\n");
7295
7296 mutex_unlock(&trace_types_lock);
7297
7298 return 0;
7299}
7300
7301static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7302{
7303 struct trace_array *tr = inode->i_private;
7304 int ret;
7305
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007306 ret = tracing_check_open_get_tr(tr);
7307 if (ret)
7308 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007309
7310 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7311 if (ret < 0)
7312 trace_array_put(tr);
7313
7314 return ret;
7315}
7316
Steven Rostedt (VMware)d8279bfc2021-03-16 12:41:07 -04007317u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7318{
7319 if (rbe == this_cpu_read(trace_buffered_event))
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03007320 return ring_buffer_time_stamp(buffer);
Steven Rostedt (VMware)d8279bfc2021-03-16 12:41:07 -04007321
7322 return ring_buffer_event_time_stamp(buffer, rbe);
7323}
7324
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007325/*
7326 * Set or disable using the per CPU trace_buffer_event when possible.
7327 */
7328int tracing_set_filter_buffering(struct trace_array *tr, bool set)
Tom Zanussi00b41452018-01-15 20:51:39 -06007329{
7330 int ret = 0;
7331
7332 mutex_lock(&trace_types_lock);
7333
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007334 if (set && tr->no_filter_buffering_ref++)
Tom Zanussi00b41452018-01-15 20:51:39 -06007335 goto out;
7336
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007337 if (!set) {
7338 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
Tom Zanussi00b41452018-01-15 20:51:39 -06007339 ret = -EINVAL;
7340 goto out;
7341 }
7342
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007343 --tr->no_filter_buffering_ref;
Tom Zanussi00b41452018-01-15 20:51:39 -06007344 }
Tom Zanussi00b41452018-01-15 20:51:39 -06007345 out:
7346 mutex_unlock(&trace_types_lock);
7347
7348 return ret;
7349}
7350
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007351struct ftrace_buffer_info {
7352 struct trace_iterator iter;
7353 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007354 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007355 unsigned int read;
7356};
7357
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007358#ifdef CONFIG_TRACER_SNAPSHOT
7359static int tracing_snapshot_open(struct inode *inode, struct file *file)
7360{
Oleg Nesterov6484c712013-07-23 17:26:10 +02007361 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007362 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007363 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007364 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007365
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007366 ret = tracing_check_open_get_tr(tr);
7367 if (ret)
7368 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007369
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007370 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02007371 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007372 if (IS_ERR(iter))
7373 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007374 } else {
7375 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007376 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007377 m = kzalloc(sizeof(*m), GFP_KERNEL);
7378 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007379 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007380 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7381 if (!iter) {
7382 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007383 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007384 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007385 ret = 0;
7386
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007387 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007388 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02007389 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007390 m->private = iter;
7391 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007392 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007393out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007394 if (ret < 0)
7395 trace_array_put(tr);
7396
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007397 return ret;
7398}
7399
7400static ssize_t
7401tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7402 loff_t *ppos)
7403{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007404 struct seq_file *m = filp->private_data;
7405 struct trace_iterator *iter = m->private;
7406 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007407 unsigned long val;
7408 int ret;
7409
7410 ret = tracing_update_buffers();
7411 if (ret < 0)
7412 return ret;
7413
7414 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7415 if (ret)
7416 return ret;
7417
7418 mutex_lock(&trace_types_lock);
7419
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007420 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007421 ret = -EBUSY;
7422 goto out;
7423 }
7424
Tom Zanussia35873a2019-02-13 17:42:45 -06007425 arch_spin_lock(&tr->max_lock);
7426 if (tr->cond_snapshot)
7427 ret = -EBUSY;
7428 arch_spin_unlock(&tr->max_lock);
7429 if (ret)
7430 goto out;
7431
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007432 switch (val) {
7433 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007434 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7435 ret = -EINVAL;
7436 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007437 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04007438 if (tr->allocated_snapshot)
7439 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007440 break;
7441 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007442/* Only allow per-cpu swap if the ring buffer supports it */
7443#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7444 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7445 ret = -EINVAL;
7446 break;
7447 }
7448#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007449 if (tr->allocated_snapshot)
7450 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007451 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007452 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007453 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007454 if (ret < 0)
7455 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007456 local_irq_disable();
7457 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007458 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06007459 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007460 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007461 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007462 local_irq_enable();
7463 break;
7464 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05007465 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007466 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7467 tracing_reset_online_cpus(&tr->max_buffer);
7468 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04007469 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007470 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007471 break;
7472 }
7473
7474 if (ret >= 0) {
7475 *ppos += cnt;
7476 ret = cnt;
7477 }
7478out:
7479 mutex_unlock(&trace_types_lock);
7480 return ret;
7481}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007482
7483static int tracing_snapshot_release(struct inode *inode, struct file *file)
7484{
7485 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007486 int ret;
7487
7488 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007489
7490 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007491 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007492
7493 /* If write only, the seq_file is just a stub */
7494 if (m)
7495 kfree(m->private);
7496 kfree(m);
7497
7498 return 0;
7499}
7500
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007501static int tracing_buffers_open(struct inode *inode, struct file *filp);
7502static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7503 size_t count, loff_t *ppos);
7504static int tracing_buffers_release(struct inode *inode, struct file *file);
7505static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7506 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7507
7508static int snapshot_raw_open(struct inode *inode, struct file *filp)
7509{
7510 struct ftrace_buffer_info *info;
7511 int ret;
7512
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007513 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007514 ret = tracing_buffers_open(inode, filp);
7515 if (ret < 0)
7516 return ret;
7517
7518 info = filp->private_data;
7519
7520 if (info->iter.trace->use_max_tr) {
7521 tracing_buffers_release(inode, filp);
7522 return -EBUSY;
7523 }
7524
7525 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007526 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007527
7528 return ret;
7529}
7530
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007531#endif /* CONFIG_TRACER_SNAPSHOT */
7532
7533
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007534static const struct file_operations tracing_thresh_fops = {
7535 .open = tracing_open_generic,
7536 .read = tracing_thresh_read,
7537 .write = tracing_thresh_write,
7538 .llseek = generic_file_llseek,
7539};
7540
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007541#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007542static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007543 .open = tracing_open_generic,
7544 .read = tracing_max_lat_read,
7545 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007546 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007547};
Chen Gange428abb2015-11-10 05:15:15 +08007548#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007549
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007550static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007551 .open = tracing_open_generic,
7552 .read = tracing_set_trace_read,
7553 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007554 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007555};
7556
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007557static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007558 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007559 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007560 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007561 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007562 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007563 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007564};
7565
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007566static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007567 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007568 .read = tracing_entries_read,
7569 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007570 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007571 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007572};
7573
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007574static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007575 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007576 .read = tracing_total_entries_read,
7577 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007578 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007579};
7580
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007581static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007582 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007583 .write = tracing_free_buffer_write,
7584 .release = tracing_free_buffer_release,
7585};
7586
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007587static const struct file_operations tracing_mark_fops = {
John Keeping2972e302021-12-07 14:25:58 +00007588 .open = tracing_mark_open,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007589 .write = tracing_mark_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007590 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007591};
7592
Steven Rostedtfa32e852016-07-06 15:25:08 -04007593static const struct file_operations tracing_mark_raw_fops = {
John Keeping2972e302021-12-07 14:25:58 +00007594 .open = tracing_mark_open,
Steven Rostedtfa32e852016-07-06 15:25:08 -04007595 .write = tracing_mark_raw_write,
Steven Rostedtfa32e852016-07-06 15:25:08 -04007596 .release = tracing_release_generic_tr,
7597};
7598
Zhaolei5079f322009-08-25 16:12:56 +08007599static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007600 .open = tracing_clock_open,
7601 .read = seq_read,
7602 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007603 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007604 .write = tracing_clock_write,
7605};
7606
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007607static const struct file_operations trace_time_stamp_mode_fops = {
7608 .open = tracing_time_stamp_mode_open,
7609 .read = seq_read,
7610 .llseek = seq_lseek,
7611 .release = tracing_single_release_tr,
7612};
7613
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007614#ifdef CONFIG_TRACER_SNAPSHOT
7615static const struct file_operations snapshot_fops = {
7616 .open = tracing_snapshot_open,
7617 .read = seq_read,
7618 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007619 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007620 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007621};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007622
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007623static const struct file_operations snapshot_raw_fops = {
7624 .open = snapshot_raw_open,
7625 .read = tracing_buffers_read,
7626 .release = tracing_buffers_release,
7627 .splice_read = tracing_buffers_splice_read,
7628 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007629};
7630
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007631#endif /* CONFIG_TRACER_SNAPSHOT */
7632
Daniel Bristot de Oliveirabc87cf0a2021-06-22 16:42:23 +02007633/*
7634 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7635 * @filp: The active open file structure
7636 * @ubuf: The userspace provided buffer to read value into
7637 * @cnt: The maximum number of bytes to read
7638 * @ppos: The current "file" position
7639 *
7640 * This function implements the write interface for a struct trace_min_max_param.
7641 * The filp->private_data must point to a trace_min_max_param structure that
7642 * defines where to write the value, the min and the max acceptable values,
7643 * and a lock to protect the write.
7644 */
7645static ssize_t
7646trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7647{
7648 struct trace_min_max_param *param = filp->private_data;
7649 u64 val;
7650 int err;
7651
7652 if (!param)
7653 return -EFAULT;
7654
7655 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7656 if (err)
7657 return err;
7658
7659 if (param->lock)
7660 mutex_lock(param->lock);
7661
7662 if (param->min && val < *param->min)
7663 err = -EINVAL;
7664
7665 if (param->max && val > *param->max)
7666 err = -EINVAL;
7667
7668 if (!err)
7669 *param->val = val;
7670
7671 if (param->lock)
7672 mutex_unlock(param->lock);
7673
7674 if (err)
7675 return err;
7676
7677 return cnt;
7678}
7679
7680/*
7681 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7682 * @filp: The active open file structure
7683 * @ubuf: The userspace provided buffer to read value into
7684 * @cnt: The maximum number of bytes to read
7685 * @ppos: The current "file" position
7686 *
7687 * This function implements the read interface for a struct trace_min_max_param.
7688 * The filp->private_data must point to a trace_min_max_param struct with valid
7689 * data.
7690 */
7691static ssize_t
7692trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7693{
7694 struct trace_min_max_param *param = filp->private_data;
7695 char buf[U64_STR_SIZE];
7696 int len;
7697 u64 val;
7698
7699 if (!param)
7700 return -EFAULT;
7701
7702 val = *param->val;
7703
7704 if (cnt > sizeof(buf))
7705 cnt = sizeof(buf);
7706
7707 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7708
7709 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7710}
7711
7712const struct file_operations trace_min_max_fops = {
7713 .open = tracing_open_generic,
7714 .read = trace_min_max_read,
7715 .write = trace_min_max_write,
7716};
7717
Tom Zanussi8a062902019-03-31 18:48:15 -05007718#define TRACING_LOG_ERRS_MAX 8
7719#define TRACING_LOG_LOC_MAX 128
7720
7721#define CMD_PREFIX " Command: "
7722
7723struct err_info {
7724 const char **errs; /* ptr to loc-specific array of err strings */
7725 u8 type; /* index into errs -> specific err string */
7726 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7727 u64 ts;
7728};
7729
7730struct tracing_log_err {
7731 struct list_head list;
7732 struct err_info info;
7733 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7734 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7735};
7736
Tom Zanussi8a062902019-03-31 18:48:15 -05007737static DEFINE_MUTEX(tracing_err_log_lock);
7738
YueHaibingff585c52019-06-14 23:32:10 +08007739static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007740{
7741 struct tracing_log_err *err;
7742
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007743 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007744 err = kzalloc(sizeof(*err), GFP_KERNEL);
7745 if (!err)
7746 err = ERR_PTR(-ENOMEM);
Tom Zanussi67ab5eb2022-01-27 15:44:18 -06007747 else
7748 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007749
7750 return err;
7751 }
7752
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007753 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007754 list_del(&err->list);
7755
7756 return err;
7757}
7758
7759/**
7760 * err_pos - find the position of a string within a command for error careting
7761 * @cmd: The tracing command that caused the error
7762 * @str: The string to position the caret at within @cmd
7763 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +01007764 * Finds the position of the first occurrence of @str within @cmd. The
Tom Zanussi8a062902019-03-31 18:48:15 -05007765 * return value can be passed to tracing_log_err() for caret placement
7766 * within @cmd.
7767 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +01007768 * Returns the index within @cmd of the first occurrence of @str or 0
Tom Zanussi8a062902019-03-31 18:48:15 -05007769 * if @str was not found.
7770 */
7771unsigned int err_pos(char *cmd, const char *str)
7772{
7773 char *found;
7774
7775 if (WARN_ON(!strlen(cmd)))
7776 return 0;
7777
7778 found = strstr(cmd, str);
7779 if (found)
7780 return found - cmd;
7781
7782 return 0;
7783}
7784
7785/**
7786 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007787 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007788 * @loc: A string describing where the error occurred
7789 * @cmd: The tracing command that caused the error
7790 * @errs: The array of loc-specific static error strings
7791 * @type: The index into errs[], which produces the specific static err string
7792 * @pos: The position the caret should be placed in the cmd
7793 *
7794 * Writes an error into tracing/error_log of the form:
7795 *
7796 * <loc>: error: <text>
7797 * Command: <cmd>
7798 * ^
7799 *
7800 * tracing/error_log is a small log file containing the last
7801 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7802 * unless there has been a tracing error, and the error log can be
7803 * cleared and have its memory freed by writing the empty string in
7804 * truncation mode to it i.e. echo > tracing/error_log.
7805 *
7806 * NOTE: the @errs array along with the @type param are used to
7807 * produce a static error string - this string is not copied and saved
7808 * when the error is logged - only a pointer to it is saved. See
7809 * existing callers for examples of how static strings are typically
7810 * defined for use with tracing_log_err().
7811 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007812void tracing_log_err(struct trace_array *tr,
7813 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007814 const char **errs, u8 type, u8 pos)
7815{
7816 struct tracing_log_err *err;
7817
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007818 if (!tr)
7819 tr = &global_trace;
7820
Tom Zanussi8a062902019-03-31 18:48:15 -05007821 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007822 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007823 if (PTR_ERR(err) == -ENOMEM) {
7824 mutex_unlock(&tracing_err_log_lock);
7825 return;
7826 }
7827
7828 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7829 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7830
7831 err->info.errs = errs;
7832 err->info.type = type;
7833 err->info.pos = pos;
7834 err->info.ts = local_clock();
7835
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007836 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007837 mutex_unlock(&tracing_err_log_lock);
7838}
7839
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007840static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007841{
7842 struct tracing_log_err *err, *next;
7843
7844 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007845 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007846 list_del(&err->list);
7847 kfree(err);
7848 }
7849
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007850 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007851 mutex_unlock(&tracing_err_log_lock);
7852}
7853
7854static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7855{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007856 struct trace_array *tr = m->private;
7857
Tom Zanussi8a062902019-03-31 18:48:15 -05007858 mutex_lock(&tracing_err_log_lock);
7859
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007860 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007861}
7862
7863static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7864{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007865 struct trace_array *tr = m->private;
7866
7867 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007868}
7869
7870static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7871{
7872 mutex_unlock(&tracing_err_log_lock);
7873}
7874
7875static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7876{
7877 u8 i;
7878
7879 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7880 seq_putc(m, ' ');
7881 for (i = 0; i < pos; i++)
7882 seq_putc(m, ' ');
7883 seq_puts(m, "^\n");
7884}
7885
7886static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7887{
7888 struct tracing_log_err *err = v;
7889
7890 if (err) {
7891 const char *err_text = err->info.errs[err->info.type];
7892 u64 sec = err->info.ts;
7893 u32 nsec;
7894
7895 nsec = do_div(sec, NSEC_PER_SEC);
7896 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7897 err->loc, err_text);
7898 seq_printf(m, "%s", err->cmd);
7899 tracing_err_log_show_pos(m, err->info.pos);
7900 }
7901
7902 return 0;
7903}
7904
7905static const struct seq_operations tracing_err_log_seq_ops = {
7906 .start = tracing_err_log_seq_start,
7907 .next = tracing_err_log_seq_next,
7908 .stop = tracing_err_log_seq_stop,
7909 .show = tracing_err_log_seq_show
7910};
7911
7912static int tracing_err_log_open(struct inode *inode, struct file *file)
7913{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007914 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007915 int ret = 0;
7916
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007917 ret = tracing_check_open_get_tr(tr);
7918 if (ret)
7919 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007920
Tom Zanussi8a062902019-03-31 18:48:15 -05007921 /* If this file was opened for write, then erase contents */
7922 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007923 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007924
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007925 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007926 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007927 if (!ret) {
7928 struct seq_file *m = file->private_data;
7929 m->private = tr;
7930 } else {
7931 trace_array_put(tr);
7932 }
7933 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007934 return ret;
7935}
7936
7937static ssize_t tracing_err_log_write(struct file *file,
7938 const char __user *buffer,
7939 size_t count, loff_t *ppos)
7940{
7941 return count;
7942}
7943
Takeshi Misawad122ed62019-06-28 19:56:40 +09007944static int tracing_err_log_release(struct inode *inode, struct file *file)
7945{
7946 struct trace_array *tr = inode->i_private;
7947
7948 trace_array_put(tr);
7949
7950 if (file->f_mode & FMODE_READ)
7951 seq_release(inode, file);
7952
7953 return 0;
7954}
7955
Tom Zanussi8a062902019-03-31 18:48:15 -05007956static const struct file_operations tracing_err_log_fops = {
7957 .open = tracing_err_log_open,
7958 .write = tracing_err_log_write,
7959 .read = seq_read,
7960 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007961 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007962};
7963
Steven Rostedt2cadf912008-12-01 22:20:19 -05007964static int tracing_buffers_open(struct inode *inode, struct file *filp)
7965{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007966 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007967 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007968 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007969
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007970 ret = tracing_check_open_get_tr(tr);
7971 if (ret)
7972 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007973
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007974 info = kvzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007975 if (!info) {
7976 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007977 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007978 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007979
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007980 mutex_lock(&trace_types_lock);
7981
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007982 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007983 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007984 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007985 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007986 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007987 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007988 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007989
7990 filp->private_data = info;
7991
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007992 tr->trace_ref++;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007993
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007994 mutex_unlock(&trace_types_lock);
7995
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007996 ret = nonseekable_open(inode, filp);
7997 if (ret < 0)
7998 trace_array_put(tr);
7999
8000 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008001}
8002
Al Viro9dd95742017-07-03 00:42:43 -04008003static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008004tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8005{
8006 struct ftrace_buffer_info *info = filp->private_data;
8007 struct trace_iterator *iter = &info->iter;
8008
8009 return trace_poll(iter, filp, poll_table);
8010}
8011
Steven Rostedt2cadf912008-12-01 22:20:19 -05008012static ssize_t
8013tracing_buffers_read(struct file *filp, char __user *ubuf,
8014 size_t count, loff_t *ppos)
8015{
8016 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008017 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008018 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008019 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008020
Steven Rostedt2dc5d122009-03-04 19:10:05 -05008021 if (!count)
8022 return 0;
8023
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008024#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008025 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8026 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008027#endif
8028
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008029 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008030 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008031 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008032 if (IS_ERR(info->spare)) {
8033 ret = PTR_ERR(info->spare);
8034 info->spare = NULL;
8035 } else {
8036 info->spare_cpu = iter->cpu_file;
8037 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008038 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08008039 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008040 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08008041
Steven Rostedt2cadf912008-12-01 22:20:19 -05008042 /* Do we have previous read data to read? */
8043 if (info->read < PAGE_SIZE)
8044 goto read;
8045
Steven Rostedtb6273442013-02-28 13:44:11 -05008046 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008047 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008048 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008049 &info->spare,
8050 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008051 iter->cpu_file, 0);
8052 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05008053
8054 if (ret < 0) {
8055 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008056 if ((filp->f_flags & O_NONBLOCK))
8057 return -EAGAIN;
8058
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05008059 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008060 if (ret)
8061 return ret;
8062
Steven Rostedtb6273442013-02-28 13:44:11 -05008063 goto again;
8064 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008065 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05008066 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05008067
Steven Rostedt436fc282011-10-14 10:44:25 -04008068 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05008069 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05008070 size = PAGE_SIZE - info->read;
8071 if (size > count)
8072 size = count;
8073
8074 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008075 if (ret == size)
8076 return -EFAULT;
8077
Steven Rostedt2dc5d122009-03-04 19:10:05 -05008078 size -= ret;
8079
Steven Rostedt2cadf912008-12-01 22:20:19 -05008080 *ppos += size;
8081 info->read += size;
8082
8083 return size;
8084}
8085
8086static int tracing_buffers_release(struct inode *inode, struct file *file)
8087{
8088 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008089 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008090
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008091 mutex_lock(&trace_types_lock);
8092
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04008093 iter->tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05008094
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04008095 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008096
Lai Jiangshanddd538f2009-04-02 15:16:59 +08008097 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008098 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008099 info->spare_cpu, info->spare);
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08008100 kvfree(info);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008101
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008102 mutex_unlock(&trace_types_lock);
8103
Steven Rostedt2cadf912008-12-01 22:20:19 -05008104 return 0;
8105}
8106
8107struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008108 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008109 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008110 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02008111 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008112};
8113
Jann Hornb9872222019-04-04 23:59:25 +02008114static void buffer_ref_release(struct buffer_ref *ref)
8115{
8116 if (!refcount_dec_and_test(&ref->refcount))
8117 return;
8118 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8119 kfree(ref);
8120}
8121
Steven Rostedt2cadf912008-12-01 22:20:19 -05008122static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8123 struct pipe_buffer *buf)
8124{
8125 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8126
Jann Hornb9872222019-04-04 23:59:25 +02008127 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008128 buf->private = 0;
8129}
8130
Matthew Wilcox15fab632019-04-05 14:02:10 -07008131static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008132 struct pipe_buffer *buf)
8133{
8134 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8135
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07008136 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07008137 return false;
8138
Jann Hornb9872222019-04-04 23:59:25 +02008139 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07008140 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008141}
8142
8143/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08008144static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05008145 .release = buffer_pipe_buf_release,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008146 .get = buffer_pipe_buf_get,
8147};
8148
8149/*
8150 * Callback from splice_to_pipe(), if we need to release some pages
8151 * at the end of the spd in case we error'ed out in filling the pipe.
8152 */
8153static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8154{
8155 struct buffer_ref *ref =
8156 (struct buffer_ref *)spd->partial[i].private;
8157
Jann Hornb9872222019-04-04 23:59:25 +02008158 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008159 spd->partial[i].private = 0;
8160}
8161
8162static ssize_t
8163tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8164 struct pipe_inode_info *pipe, size_t len,
8165 unsigned int flags)
8166{
8167 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008168 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02008169 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8170 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05008171 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02008172 .pages = pages_def,
8173 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02008174 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008175 .ops = &buffer_pipe_buf_ops,
8176 .spd_release = buffer_spd_release,
8177 };
8178 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05008179 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01008180 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008181
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008182#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008183 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8184 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008185#endif
8186
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008187 if (*ppos & (PAGE_SIZE - 1))
8188 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008189
8190 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008191 if (len < PAGE_SIZE)
8192 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008193 len &= PAGE_MASK;
8194 }
8195
Al Viro1ae22932016-09-17 18:31:46 -04008196 if (splice_grow_spd(pipe, &spd))
8197 return -ENOMEM;
8198
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008199 again:
8200 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008201 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04008202
Al Viroa786c062014-04-11 12:01:03 -04008203 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05008204 struct page *page;
8205 int r;
8206
8207 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01008208 if (!ref) {
8209 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008210 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01008211 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05008212
Jann Hornb9872222019-04-04 23:59:25 +02008213 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008214 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008215 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008216 if (IS_ERR(ref->page)) {
8217 ret = PTR_ERR(ref->page);
8218 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008219 kfree(ref);
8220 break;
8221 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008222 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008223
8224 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008225 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008226 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008227 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8228 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008229 kfree(ref);
8230 break;
8231 }
8232
Steven Rostedt2cadf912008-12-01 22:20:19 -05008233 page = virt_to_page(ref->page);
8234
8235 spd.pages[i] = page;
8236 spd.partial[i].len = PAGE_SIZE;
8237 spd.partial[i].offset = 0;
8238 spd.partial[i].private = (unsigned long)ref;
8239 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008240 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04008241
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008242 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008243 }
8244
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008245 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008246 spd.nr_pages = i;
8247
8248 /* did we read anything? */
8249 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01008250 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04008251 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01008252
Al Viro1ae22932016-09-17 18:31:46 -04008253 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008254 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04008255 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008256
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008257 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04008258 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04008259 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01008260
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008261 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008262 }
8263
8264 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04008265out:
Eric Dumazet047fe362012-06-12 15:24:40 +02008266 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008267
Steven Rostedt2cadf912008-12-01 22:20:19 -05008268 return ret;
8269}
8270
8271static const struct file_operations tracing_buffers_fops = {
8272 .open = tracing_buffers_open,
8273 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008274 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008275 .release = tracing_buffers_release,
8276 .splice_read = tracing_buffers_splice_read,
8277 .llseek = no_llseek,
8278};
8279
Steven Rostedtc8d77182009-04-29 18:03:45 -04008280static ssize_t
8281tracing_stats_read(struct file *filp, char __user *ubuf,
8282 size_t count, loff_t *ppos)
8283{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008284 struct inode *inode = file_inode(filp);
8285 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008286 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008287 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008288 struct trace_seq *s;
8289 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008290 unsigned long long t;
8291 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04008292
Li Zefane4f2d102009-06-15 10:57:28 +08008293 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008294 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01008295 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04008296
8297 trace_seq_init(s);
8298
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008299 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008300 trace_seq_printf(s, "entries: %ld\n", cnt);
8301
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008302 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008303 trace_seq_printf(s, "overrun: %ld\n", cnt);
8304
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008305 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008306 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8307
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008308 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008309 trace_seq_printf(s, "bytes: %ld\n", cnt);
8310
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09008311 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008312 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008313 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008314 usec_rem = do_div(t, USEC_PER_SEC);
8315 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8316 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008317
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03008318 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008319 usec_rem = do_div(t, USEC_PER_SEC);
8320 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8321 } else {
8322 /* counter or tsc mode for trace_clock */
8323 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008324 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008325
8326 trace_seq_printf(s, "now ts: %llu\n",
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03008327 ring_buffer_time_stamp(trace_buf->buffer));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008328 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008329
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008330 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07008331 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8332
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008333 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05008334 trace_seq_printf(s, "read events: %ld\n", cnt);
8335
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05008336 count = simple_read_from_buffer(ubuf, count, ppos,
8337 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04008338
8339 kfree(s);
8340
8341 return count;
8342}
8343
8344static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008345 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04008346 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008347 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008348 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04008349};
8350
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008351#ifdef CONFIG_DYNAMIC_FTRACE
8352
8353static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008354tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008355 size_t cnt, loff_t *ppos)
8356{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008357 ssize_t ret;
8358 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008359 int r;
8360
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008361 /* 256 should be plenty to hold the amount needed */
8362 buf = kmalloc(256, GFP_KERNEL);
8363 if (!buf)
8364 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008365
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008366 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8367 ftrace_update_tot_cnt,
8368 ftrace_number_of_pages,
8369 ftrace_number_of_groups);
8370
8371 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8372 kfree(buf);
8373 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008374}
8375
Steven Rostedt5e2336a2009-03-05 21:44:55 -05008376static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02008377 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008378 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008379 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008380};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008381#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008382
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008383#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8384static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04008385ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008386 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008387 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008388{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04008389 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008390}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008391
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008392static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04008393ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008394 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008395 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008396{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008397 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008398 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008399
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008400 if (mapper)
8401 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008402
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008403 if (count) {
8404
8405 if (*count <= 0)
8406 return;
8407
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008408 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008409 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008410
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04008411 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008412}
8413
8414static int
8415ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8416 struct ftrace_probe_ops *ops, void *data)
8417{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008418 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008419 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008420
8421 seq_printf(m, "%ps:", (void *)ip);
8422
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01008423 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008424
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008425 if (mapper)
8426 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8427
8428 if (count)
8429 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008430 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008431 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008432
8433 return 0;
8434}
8435
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008436static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008437ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008438 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008439{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008440 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008441
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008442 if (!mapper) {
8443 mapper = allocate_ftrace_func_mapper();
8444 if (!mapper)
8445 return -ENOMEM;
8446 *data = mapper;
8447 }
8448
8449 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008450}
8451
8452static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008453ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008454 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008455{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008456 struct ftrace_func_mapper *mapper = data;
8457
8458 if (!ip) {
8459 if (!mapper)
8460 return;
8461 free_ftrace_func_mapper(mapper, NULL);
8462 return;
8463 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008464
8465 ftrace_func_mapper_remove_ip(mapper, ip);
8466}
8467
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008468static struct ftrace_probe_ops snapshot_probe_ops = {
8469 .func = ftrace_snapshot,
8470 .print = ftrace_snapshot_print,
8471};
8472
8473static struct ftrace_probe_ops snapshot_count_probe_ops = {
8474 .func = ftrace_count_snapshot,
8475 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008476 .init = ftrace_snapshot_init,
8477 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008478};
8479
8480static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008481ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008482 char *glob, char *cmd, char *param, int enable)
8483{
8484 struct ftrace_probe_ops *ops;
8485 void *count = (void *)-1;
8486 char *number;
8487 int ret;
8488
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04008489 if (!tr)
8490 return -ENODEV;
8491
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008492 /* hash funcs only work with set_ftrace_filter */
8493 if (!enable)
8494 return -EINVAL;
8495
8496 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8497
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04008498 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04008499 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008500
8501 if (!param)
8502 goto out_reg;
8503
8504 number = strsep(&param, ":");
8505
8506 if (!strlen(number))
8507 goto out_reg;
8508
8509 /*
8510 * We use the callback data field (which is a pointer)
8511 * as our counter.
8512 */
8513 ret = kstrtoul(number, 0, (unsigned long *)&count);
8514 if (ret)
8515 return ret;
8516
8517 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04008518 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008519 if (ret < 0)
8520 goto out;
8521
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008522 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008523
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008524 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008525 return ret < 0 ? ret : 0;
8526}
8527
8528static struct ftrace_func_command ftrace_snapshot_cmd = {
8529 .name = "snapshot",
8530 .func = ftrace_trace_snapshot_callback,
8531};
8532
Tom Zanussi38de93a2013-10-24 08:34:18 -05008533static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008534{
8535 return register_ftrace_command(&ftrace_snapshot_cmd);
8536}
8537#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05008538static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008539#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008540
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008541static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008542{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008543 if (WARN_ON(!tr->dir))
8544 return ERR_PTR(-ENODEV);
8545
8546 /* Top directory uses NULL as the parent */
8547 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8548 return NULL;
8549
8550 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008551 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008552}
8553
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008554static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8555{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008556 struct dentry *d_tracer;
8557
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008558 if (tr->percpu_dir)
8559 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008560
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008561 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008562 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008563 return NULL;
8564
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008565 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008566
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008567 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008568 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008569
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008570 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008571}
8572
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008573static struct dentry *
8574trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8575 void *data, long cpu, const struct file_operations *fops)
8576{
8577 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8578
8579 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008580 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008581 return ret;
8582}
8583
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008584static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008585tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008586{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008587 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008588 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008589 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008590
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008591 if (!d_percpu)
8592 return;
8593
Steven Rostedtdd49a382010-10-20 21:51:26 -04008594 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008595 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008596 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008597 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008598 return;
8599 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008600
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008601 /* per cpu trace_pipe */
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008602 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008603 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008604
8605 /* per cpu trace */
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008606 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008607 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008608
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008609 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008610 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008611
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008612 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008613 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008614
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008615 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008616 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008617
8618#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008619 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008620 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008621
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008622 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008623 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008624#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008625}
8626
Steven Rostedt60a11772008-05-12 21:20:44 +02008627#ifdef CONFIG_FTRACE_SELFTEST
8628/* Let selftest have access to static functions in this file */
8629#include "trace_selftest.c"
8630#endif
8631
Steven Rostedt577b7852009-02-26 23:43:05 -05008632static ssize_t
8633trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8634 loff_t *ppos)
8635{
8636 struct trace_option_dentry *topt = filp->private_data;
8637 char *buf;
8638
8639 if (topt->flags->val & topt->opt->bit)
8640 buf = "1\n";
8641 else
8642 buf = "0\n";
8643
8644 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8645}
8646
8647static ssize_t
8648trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8649 loff_t *ppos)
8650{
8651 struct trace_option_dentry *topt = filp->private_data;
8652 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008653 int ret;
8654
Peter Huewe22fe9b52011-06-07 21:58:27 +02008655 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8656 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008657 return ret;
8658
Li Zefan8d18eaa2009-12-08 11:17:06 +08008659 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008660 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008661
8662 if (!!(topt->flags->val & topt->opt->bit) != val) {
8663 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008664 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008665 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008666 mutex_unlock(&trace_types_lock);
8667 if (ret)
8668 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008669 }
8670
8671 *ppos += cnt;
8672
8673 return cnt;
8674}
8675
8676
8677static const struct file_operations trace_options_fops = {
8678 .open = tracing_open_generic,
8679 .read = trace_options_read,
8680 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008681 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008682};
8683
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008684/*
8685 * In order to pass in both the trace_array descriptor as well as the index
8686 * to the flag that the trace option file represents, the trace_array
8687 * has a character array of trace_flags_index[], which holds the index
8688 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8689 * The address of this character array is passed to the flag option file
8690 * read/write callbacks.
8691 *
8692 * In order to extract both the index and the trace_array descriptor,
8693 * get_tr_index() uses the following algorithm.
8694 *
8695 * idx = *ptr;
8696 *
8697 * As the pointer itself contains the address of the index (remember
8698 * index[1] == 1).
8699 *
8700 * Then to get the trace_array descriptor, by subtracting that index
8701 * from the ptr, we get to the start of the index itself.
8702 *
8703 * ptr - idx == &index[0]
8704 *
8705 * Then a simple container_of() from that pointer gets us to the
8706 * trace_array descriptor.
8707 */
8708static void get_tr_index(void *data, struct trace_array **ptr,
8709 unsigned int *pindex)
8710{
8711 *pindex = *(unsigned char *)data;
8712
8713 *ptr = container_of(data - *pindex, struct trace_array,
8714 trace_flags_index);
8715}
8716
Steven Rostedta8259072009-02-26 22:19:12 -05008717static ssize_t
8718trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8719 loff_t *ppos)
8720{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008721 void *tr_index = filp->private_data;
8722 struct trace_array *tr;
8723 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008724 char *buf;
8725
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008726 get_tr_index(tr_index, &tr, &index);
8727
8728 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008729 buf = "1\n";
8730 else
8731 buf = "0\n";
8732
8733 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8734}
8735
8736static ssize_t
8737trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8738 loff_t *ppos)
8739{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008740 void *tr_index = filp->private_data;
8741 struct trace_array *tr;
8742 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008743 unsigned long val;
8744 int ret;
8745
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008746 get_tr_index(tr_index, &tr, &index);
8747
Peter Huewe22fe9b52011-06-07 21:58:27 +02008748 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8749 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008750 return ret;
8751
Zhaoleif2d84b62009-08-07 18:55:48 +08008752 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008753 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008754
Prateek Sood3a53acf2019-12-10 09:15:16 +00008755 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008756 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008757 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008758 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008759 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008760
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008761 if (ret < 0)
8762 return ret;
8763
Steven Rostedta8259072009-02-26 22:19:12 -05008764 *ppos += cnt;
8765
8766 return cnt;
8767}
8768
Steven Rostedta8259072009-02-26 22:19:12 -05008769static const struct file_operations trace_options_core_fops = {
8770 .open = tracing_open_generic,
8771 .read = trace_options_core_read,
8772 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008773 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008774};
8775
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008776struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008777 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008778 struct dentry *parent,
8779 void *data,
8780 const struct file_operations *fops)
8781{
8782 struct dentry *ret;
8783
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008784 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008785 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008786 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008787
8788 return ret;
8789}
8790
8791
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008792static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008793{
8794 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008795
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008796 if (tr->options)
8797 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008798
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008799 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008800 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008801 return NULL;
8802
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008803 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008804 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008805 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008806 return NULL;
8807 }
8808
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008809 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008810}
8811
Steven Rostedt577b7852009-02-26 23:43:05 -05008812static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008813create_trace_option_file(struct trace_array *tr,
8814 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008815 struct tracer_flags *flags,
8816 struct tracer_opt *opt)
8817{
8818 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008819
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008820 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008821 if (!t_options)
8822 return;
8823
8824 topt->flags = flags;
8825 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008826 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008827
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008828 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8829 t_options, topt, &trace_options_fops);
Steven Rostedt577b7852009-02-26 23:43:05 -05008830
Steven Rostedt577b7852009-02-26 23:43:05 -05008831}
8832
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008833static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008834create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008835{
8836 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008837 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008838 struct tracer_flags *flags;
8839 struct tracer_opt *opts;
8840 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008841 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008842
8843 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008844 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008845
8846 flags = tracer->flags;
8847
8848 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008849 return;
8850
8851 /*
8852 * If this is an instance, only create flags for tracers
8853 * the instance may have.
8854 */
8855 if (!trace_ok_for_array(tracer, tr))
8856 return;
8857
8858 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008859 /* Make sure there's no duplicate flags. */
8860 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008861 return;
8862 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008863
8864 opts = flags->opts;
8865
8866 for (cnt = 0; opts[cnt].name; cnt++)
8867 ;
8868
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008869 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008870 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008871 return;
8872
8873 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8874 GFP_KERNEL);
8875 if (!tr_topts) {
8876 kfree(topts);
8877 return;
8878 }
8879
8880 tr->topts = tr_topts;
8881 tr->topts[tr->nr_topts].tracer = tracer;
8882 tr->topts[tr->nr_topts].topts = topts;
8883 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008884
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008885 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008886 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008887 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008888 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008889 "Failed to create trace option: %s",
8890 opts[cnt].name);
8891 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008892}
8893
Steven Rostedta8259072009-02-26 22:19:12 -05008894static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008895create_trace_option_core_file(struct trace_array *tr,
8896 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008897{
8898 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008899
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008900 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008901 if (!t_options)
8902 return NULL;
8903
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008904 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008905 (void *)&tr->trace_flags_index[index],
8906 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008907}
8908
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008909static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008910{
8911 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008912 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008913 int i;
8914
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008915 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008916 if (!t_options)
8917 return;
8918
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008919 for (i = 0; trace_options[i]; i++) {
8920 if (top_level ||
8921 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8922 create_trace_option_core_file(tr, trace_options[i], i);
8923 }
Steven Rostedta8259072009-02-26 22:19:12 -05008924}
8925
Steven Rostedt499e5472012-02-22 15:50:28 -05008926static ssize_t
8927rb_simple_read(struct file *filp, char __user *ubuf,
8928 size_t cnt, loff_t *ppos)
8929{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008930 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008931 char buf[64];
8932 int r;
8933
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008934 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008935 r = sprintf(buf, "%d\n", r);
8936
8937 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8938}
8939
8940static ssize_t
8941rb_simple_write(struct file *filp, const char __user *ubuf,
8942 size_t cnt, loff_t *ppos)
8943{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008944 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008945 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008946 unsigned long val;
8947 int ret;
8948
8949 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8950 if (ret)
8951 return ret;
8952
8953 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008954 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008955 if (!!val == tracer_tracing_is_on(tr)) {
8956 val = 0; /* do nothing */
8957 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008958 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008959 if (tr->current_trace->start)
8960 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008961 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008962 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008963 if (tr->current_trace->stop)
8964 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008965 }
8966 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008967 }
8968
8969 (*ppos)++;
8970
8971 return cnt;
8972}
8973
8974static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008975 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008976 .read = rb_simple_read,
8977 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008978 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008979 .llseek = default_llseek,
8980};
8981
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008982static ssize_t
8983buffer_percent_read(struct file *filp, char __user *ubuf,
8984 size_t cnt, loff_t *ppos)
8985{
8986 struct trace_array *tr = filp->private_data;
8987 char buf[64];
8988 int r;
8989
8990 r = tr->buffer_percent;
8991 r = sprintf(buf, "%d\n", r);
8992
8993 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8994}
8995
8996static ssize_t
8997buffer_percent_write(struct file *filp, const char __user *ubuf,
8998 size_t cnt, loff_t *ppos)
8999{
9000 struct trace_array *tr = filp->private_data;
9001 unsigned long val;
9002 int ret;
9003
9004 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9005 if (ret)
9006 return ret;
9007
9008 if (val > 100)
9009 return -EINVAL;
9010
9011 if (!val)
9012 val = 1;
9013
9014 tr->buffer_percent = val;
9015
9016 (*ppos)++;
9017
9018 return cnt;
9019}
9020
9021static const struct file_operations buffer_percent_fops = {
9022 .open = tracing_open_generic_tr,
9023 .read = buffer_percent_read,
9024 .write = buffer_percent_write,
9025 .release = tracing_release_generic_tr,
9026 .llseek = default_llseek,
9027};
9028
YueHaibingff585c52019-06-14 23:32:10 +08009029static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04009030
9031static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009032init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04009033
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009034static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009035allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04009036{
9037 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009038
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009039 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009040
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05009041 buf->tr = tr;
9042
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009043 buf->buffer = ring_buffer_alloc(size, rb_flags);
9044 if (!buf->buffer)
9045 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009046
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009047 buf->data = alloc_percpu(struct trace_array_cpu);
9048 if (!buf->data) {
9049 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05009050 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009051 return -ENOMEM;
9052 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009053
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009054 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009055 set_buffer_entries(&tr->array_buffer,
9056 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009057
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009058 return 0;
9059}
9060
9061static int allocate_trace_buffers(struct trace_array *tr, int size)
9062{
9063 int ret;
9064
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009065 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009066 if (ret)
9067 return ret;
9068
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009069#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009070 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9071 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009072 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009073 ring_buffer_free(tr->array_buffer.buffer);
9074 tr->array_buffer.buffer = NULL;
9075 free_percpu(tr->array_buffer.data);
9076 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009077 return -ENOMEM;
9078 }
9079 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009080
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009081 /*
9082 * Only the top level trace array gets its snapshot allocated
9083 * from the kernel command line.
9084 */
9085 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009086#endif
Steven Rostedt (VMware)11f5efc2020-05-06 10:36:18 -04009087
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009088 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009089}
9090
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009091static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04009092{
9093 if (buf->buffer) {
9094 ring_buffer_free(buf->buffer);
9095 buf->buffer = NULL;
9096 free_percpu(buf->data);
9097 buf->data = NULL;
9098 }
9099}
9100
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009101static void free_trace_buffers(struct trace_array *tr)
9102{
9103 if (!tr)
9104 return;
9105
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009106 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009107
9108#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04009109 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009110#endif
9111}
9112
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009113static void init_trace_flags_index(struct trace_array *tr)
9114{
9115 int i;
9116
9117 /* Used by the trace options files */
9118 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9119 tr->trace_flags_index[i] = i;
9120}
9121
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009122static void __update_tracer_options(struct trace_array *tr)
9123{
9124 struct tracer *t;
9125
9126 for (t = trace_types; t; t = t->next)
9127 add_tracer_options(tr, t);
9128}
9129
9130static void update_tracer_options(struct trace_array *tr)
9131{
9132 mutex_lock(&trace_types_lock);
9133 __update_tracer_options(tr);
9134 mutex_unlock(&trace_types_lock);
9135}
9136
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009137/* Must have trace_types_lock held */
9138struct trace_array *trace_array_find(const char *instance)
9139{
9140 struct trace_array *tr, *found = NULL;
9141
9142 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9143 if (tr->name && strcmp(tr->name, instance) == 0) {
9144 found = tr;
9145 break;
9146 }
9147 }
9148
9149 return found;
9150}
9151
9152struct trace_array *trace_array_find_get(const char *instance)
9153{
9154 struct trace_array *tr;
9155
9156 mutex_lock(&trace_types_lock);
9157 tr = trace_array_find(instance);
9158 if (tr)
9159 tr->ref++;
9160 mutex_unlock(&trace_types_lock);
9161
9162 return tr;
9163}
9164
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009165static int trace_array_create_dir(struct trace_array *tr)
9166{
9167 int ret;
9168
9169 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9170 if (!tr->dir)
9171 return -EINVAL;
9172
9173 ret = event_trace_add_tracer(tr->dir, tr);
Kamal Agrawalff41c282021-07-30 18:53:06 +05309174 if (ret) {
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009175 tracefs_remove(tr->dir);
Kamal Agrawalff41c282021-07-30 18:53:06 +05309176 return ret;
9177 }
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009178
9179 init_tracer_tracefs(tr, tr->dir);
9180 __update_tracer_options(tr);
9181
9182 return ret;
9183}
9184
Divya Indi28879782019-11-20 11:08:38 -08009185static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009186{
Steven Rostedt277ba042012-08-03 16:10:49 -04009187 struct trace_array *tr;
9188 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04009189
Steven Rostedt277ba042012-08-03 16:10:49 -04009190 ret = -ENOMEM;
9191 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9192 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08009193 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04009194
9195 tr->name = kstrdup(name, GFP_KERNEL);
9196 if (!tr->name)
9197 goto out_free_tr;
9198
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009199 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9200 goto out_free_tr;
9201
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04009202 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009203
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009204 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9205
Steven Rostedt277ba042012-08-03 16:10:49 -04009206 raw_spin_lock_init(&tr->start_lock);
9207
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009208 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9209
Steven Rostedt277ba042012-08-03 16:10:49 -04009210 tr->current_trace = &nop_trace;
9211
9212 INIT_LIST_HEAD(&tr->systems);
9213 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009214 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009215 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04009216
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009217 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04009218 goto out_free_tr;
9219
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009220 if (ftrace_allocate_ftrace_ops(tr) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04009221 goto out_free_tr;
9222
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04009223 ftrace_init_trace_array(tr);
9224
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009225 init_trace_flags_index(tr);
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009226
9227 if (trace_instance_dir) {
9228 ret = trace_array_create_dir(tr);
9229 if (ret)
9230 goto out_free_tr;
Masami Hiramatsu720dee52020-09-25 01:40:08 +09009231 } else
9232 __trace_early_add_events(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04009233
9234 list_add(&tr->list, &ftrace_trace_arrays);
9235
Divya Indi28879782019-11-20 11:08:38 -08009236 tr->ref++;
9237
Divya Indif45d1222019-03-20 11:28:51 -07009238 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04009239
9240 out_free_tr:
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009241 ftrace_free_ftrace_ops(tr);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009242 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009243 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04009244 kfree(tr->name);
9245 kfree(tr);
9246
Divya Indif45d1222019-03-20 11:28:51 -07009247 return ERR_PTR(ret);
9248}
Steven Rostedt277ba042012-08-03 16:10:49 -04009249
Divya Indif45d1222019-03-20 11:28:51 -07009250static int instance_mkdir(const char *name)
9251{
Divya Indi28879782019-11-20 11:08:38 -08009252 struct trace_array *tr;
9253 int ret;
9254
9255 mutex_lock(&event_mutex);
9256 mutex_lock(&trace_types_lock);
9257
9258 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009259 if (trace_array_find(name))
9260 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08009261
9262 tr = trace_array_create(name);
9263
9264 ret = PTR_ERR_OR_ZERO(tr);
9265
9266out_unlock:
9267 mutex_unlock(&trace_types_lock);
9268 mutex_unlock(&event_mutex);
9269 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04009270}
9271
Divya Indi28879782019-11-20 11:08:38 -08009272/**
9273 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9274 * @name: The name of the trace array to be looked up/created.
9275 *
9276 * Returns pointer to trace array with given name.
9277 * NULL, if it cannot be created.
9278 *
9279 * NOTE: This function increments the reference counter associated with the
9280 * trace array returned. This makes sure it cannot be freed while in use.
9281 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05009282 * If the trace_array is to be freed, trace_array_destroy() needs to
9283 * be called after the trace_array_put(), or simply let user space delete
9284 * it from the tracefs instances directory. But until the
9285 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08009286 *
9287 */
9288struct trace_array *trace_array_get_by_name(const char *name)
9289{
9290 struct trace_array *tr;
9291
9292 mutex_lock(&event_mutex);
9293 mutex_lock(&trace_types_lock);
9294
9295 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9296 if (tr->name && strcmp(tr->name, name) == 0)
9297 goto out_unlock;
9298 }
9299
9300 tr = trace_array_create(name);
9301
9302 if (IS_ERR(tr))
9303 tr = NULL;
9304out_unlock:
9305 if (tr)
9306 tr->ref++;
9307
9308 mutex_unlock(&trace_types_lock);
9309 mutex_unlock(&event_mutex);
9310 return tr;
9311}
9312EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9313
Divya Indif45d1222019-03-20 11:28:51 -07009314static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009315{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009316 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009317
Divya Indi28879782019-11-20 11:08:38 -08009318 /* Reference counter for a newly created trace array = 1. */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04009319 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
Divya Indif45d1222019-03-20 11:28:51 -07009320 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05009321
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009322 list_del(&tr->list);
9323
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04009324 /* Disable all the flags that were enabled coming in */
9325 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9326 if ((1 << i) & ZEROED_TRACE_FLAGS)
9327 set_tracer_flag(tr, 1 << i, 0);
9328 }
9329
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05009330 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05309331 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009332 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09009333 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009334 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05009335 tracefs_remove(tr->dir);
Yordan Karadzhov (VMware)20344c52021-04-15 21:18:51 +03009336 free_percpu(tr->last_func_repeats);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04009337 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009338
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009339 for (i = 0; i < tr->nr_topts; i++) {
9340 kfree(tr->topts[i].topts);
9341 }
9342 kfree(tr->topts);
9343
Chunyu Hudb9108e02017-07-20 18:36:09 +08009344 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009345 kfree(tr->name);
9346 kfree(tr);
9347
Divya Indif45d1222019-03-20 11:28:51 -07009348 return 0;
9349}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009350
Divya Indie585e642019-08-14 10:55:24 -07009351int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07009352{
Divya Indie585e642019-08-14 10:55:24 -07009353 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07009354 int ret;
9355
Divya Indie585e642019-08-14 10:55:24 -07009356 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07009357 return -EINVAL;
9358
9359 mutex_lock(&event_mutex);
9360 mutex_lock(&trace_types_lock);
9361
Divya Indie585e642019-08-14 10:55:24 -07009362 ret = -ENODEV;
9363
9364 /* Making sure trace array exists before destroying it. */
9365 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9366 if (tr == this_tr) {
9367 ret = __remove_instance(tr);
9368 break;
9369 }
9370 }
Divya Indif45d1222019-03-20 11:28:51 -07009371
9372 mutex_unlock(&trace_types_lock);
9373 mutex_unlock(&event_mutex);
9374
9375 return ret;
9376}
9377EXPORT_SYMBOL_GPL(trace_array_destroy);
9378
9379static int instance_rmdir(const char *name)
9380{
9381 struct trace_array *tr;
9382 int ret;
9383
9384 mutex_lock(&event_mutex);
9385 mutex_lock(&trace_types_lock);
9386
9387 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009388 tr = trace_array_find(name);
9389 if (tr)
9390 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07009391
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009392 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04009393 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009394
9395 return ret;
9396}
9397
Steven Rostedt277ba042012-08-03 16:10:49 -04009398static __init void create_trace_instances(struct dentry *d_tracer)
9399{
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009400 struct trace_array *tr;
9401
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05009402 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9403 instance_mkdir,
9404 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009405 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04009406 return;
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009407
9408 mutex_lock(&event_mutex);
9409 mutex_lock(&trace_types_lock);
9410
9411 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9412 if (!tr->name)
9413 continue;
9414 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9415 "Failed to create instance directory\n"))
9416 break;
9417 }
9418
9419 mutex_unlock(&trace_types_lock);
9420 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04009421}
9422
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009423static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009424init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009425{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009426 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009427 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009428
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009429 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05009430 tr, &show_traces_fops);
9431
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009432 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05009433 tr, &set_tracer_fops);
9434
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009435 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009436 tr, &tracing_cpumask_fops);
9437
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009438 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009439 tr, &tracing_iter_fops);
9440
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009441 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009442 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009443
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009444 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02009445 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009446
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009447 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02009448 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009449
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009450 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009451 tr, &tracing_total_entries_fops);
9452
Wang YanQing238ae932013-05-26 16:52:01 +08009453 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009454 tr, &tracing_free_buffer_fops);
9455
9456 trace_create_file("trace_marker", 0220, d_tracer,
9457 tr, &tracing_mark_fops);
9458
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009459 file = __find_event_file(tr, "ftrace", "print");
9460 if (file && file->dir)
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009461 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9462 file, &event_trigger_fops);
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009463 tr->trace_marker_file = file;
9464
Steven Rostedtfa32e852016-07-06 15:25:08 -04009465 trace_create_file("trace_marker_raw", 0220, d_tracer,
9466 tr, &tracing_mark_raw_fops);
9467
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009468 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009469 &trace_clock_fops);
9470
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009471 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009472 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009473
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009474 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
Tom Zanussi2c1ea602018-01-15 20:51:41 -06009475 &trace_time_stamp_mode_fops);
9476
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05009477 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05009478
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009479 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05009480 tr, &buffer_percent_fops);
9481
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04009482 create_trace_options_dir(tr);
9483
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02009484 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05009485
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009486 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009487 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009488
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009489#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009490 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009491 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009492#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009493
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009494 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
Tom Zanussi8a062902019-03-31 18:48:15 -05009495 tr, &tracing_err_log_fops);
9496
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009497 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009498 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009499
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04009500 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009501}
9502
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009503static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009504{
9505 struct vfsmount *mnt;
9506 struct file_system_type *type;
9507
9508 /*
9509 * To maintain backward compatibility for tools that mount
9510 * debugfs to get to the tracing facility, tracefs is automatically
9511 * mounted to the debugfs/tracing directory.
9512 */
9513 type = get_fs_type("tracefs");
9514 if (!type)
9515 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009516 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009517 put_filesystem(type);
9518 if (IS_ERR(mnt))
9519 return NULL;
9520 mntget(mnt);
9521
9522 return mnt;
9523}
9524
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009525/**
9526 * tracing_init_dentry - initialize top level trace array
9527 *
9528 * This is called when creating files or directories in the tracing
9529 * directory. It is called via fs_initcall() by any of the boot up code
9530 * and expects to return the dentry of the top level tracing directory.
9531 */
Wei Yang22c36b12020-07-12 09:10:36 +08009532int tracing_init_dentry(void)
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009533{
9534 struct trace_array *tr = &global_trace;
9535
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009536 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009537 pr_warn("Tracing disabled due to lockdown\n");
Wei Yang22c36b12020-07-12 09:10:36 +08009538 return -EPERM;
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009539 }
9540
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009541 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009542 if (tr->dir)
Wei Yang22c36b12020-07-12 09:10:36 +08009543 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009544
Peter Enderborg072e1332020-07-16 09:15:10 +02009545 if (WARN_ON(!tracefs_initialized()))
Wei Yang22c36b12020-07-12 09:10:36 +08009546 return -ENODEV;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009547
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009548 /*
9549 * As there may still be users that expect the tracing
9550 * files to exist in debugfs/tracing, we must automount
9551 * the tracefs file system there, so older tools still
Ingo Molnarf2cc0202021-03-23 18:49:35 +01009552 * work with the newer kernel.
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009553 */
9554 tr->dir = debugfs_create_automount("tracing", NULL,
9555 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009556
Wei Yang22c36b12020-07-12 09:10:36 +08009557 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009558}
9559
Jeremy Linton00f4b652017-05-31 16:56:43 -05009560extern struct trace_eval_map *__start_ftrace_eval_maps[];
9561extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009562
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009563static struct workqueue_struct *eval_map_wq __initdata;
9564static struct work_struct eval_map_work __initdata;
9565
9566static void __init eval_map_work_func(struct work_struct *work)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009567{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009568 int len;
9569
Jeremy Linton02fd7f62017-05-31 16:56:42 -05009570 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009571 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009572}
9573
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009574static int __init trace_eval_init(void)
9575{
9576 INIT_WORK(&eval_map_work, eval_map_work_func);
9577
9578 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9579 if (!eval_map_wq) {
9580 pr_err("Unable to allocate eval_map_wq\n");
9581 /* Do work here */
9582 eval_map_work_func(&eval_map_work);
9583 return -ENOMEM;
9584 }
9585
9586 queue_work(eval_map_wq, &eval_map_work);
9587 return 0;
9588}
9589
9590static int __init trace_eval_sync(void)
9591{
9592 /* Make sure the eval map updates are finished */
9593 if (eval_map_wq)
9594 destroy_workqueue(eval_map_wq);
9595 return 0;
9596}
9597
9598late_initcall_sync(trace_eval_sync);
9599
9600
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009601#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009602static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009603{
Jeremy Linton99be6472017-05-31 16:56:44 -05009604 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009605 return;
9606
9607 /*
9608 * Modules with bad taint do not have events created, do
9609 * not bother with enums either.
9610 */
9611 if (trace_module_has_bad_taint(mod))
9612 return;
9613
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009614 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009615}
9616
Jeremy Linton681bec02017-05-31 16:56:53 -05009617#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009618static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009619{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009620 union trace_eval_map_item *map;
9621 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009622
Jeremy Linton99be6472017-05-31 16:56:44 -05009623 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009624 return;
9625
Jeremy Linton1793ed92017-05-31 16:56:46 -05009626 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009627
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009628 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009629
9630 while (map) {
9631 if (map->head.mod == mod)
9632 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009633 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009634 last = &map->tail.next;
9635 map = map->tail.next;
9636 }
9637 if (!map)
9638 goto out;
9639
Jeremy Linton5f60b352017-05-31 16:56:47 -05009640 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009641 kfree(map);
9642 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009643 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009644}
9645#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009646static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009647#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009648
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009649static int trace_module_notify(struct notifier_block *self,
9650 unsigned long val, void *data)
9651{
9652 struct module *mod = data;
9653
9654 switch (val) {
9655 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009656 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009657 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009658 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009659 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009660 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009661 }
9662
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02009663 return NOTIFY_OK;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009664}
9665
9666static struct notifier_block trace_module_nb = {
9667 .notifier_call = trace_module_notify,
9668 .priority = 0,
9669};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009670#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009671
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009672static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009673{
Wei Yang22c36b12020-07-12 09:10:36 +08009674 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009675
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009676 trace_access_lock_init();
9677
Wei Yang22c36b12020-07-12 09:10:36 +08009678 ret = tracing_init_dentry();
9679 if (ret)
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009680 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009681
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009682 event_trace_init();
9683
Wei Yang22c36b12020-07-12 09:10:36 +08009684 init_tracer_tracefs(&global_trace, NULL);
9685 ftrace_init_tracefs_toplevel(&global_trace, NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009686
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009687 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009688 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009689
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009690 trace_create_file("README", TRACE_MODE_READ, NULL,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009691 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009692
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009693 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
Avadh Patel69abe6a2009-04-10 16:04:48 -04009694 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009695
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009696 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009697 NULL, &tracing_saved_cmdlines_size_fops);
9698
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009699 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
Michael Sartain99c621d2017-07-05 22:07:15 -06009700 NULL, &tracing_saved_tgids_fops);
9701
Jeremy Linton5f60b352017-05-31 16:56:47 -05009702 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009703
Wei Yang22c36b12020-07-12 09:10:36 +08009704 trace_create_eval_file(NULL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009705
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009706#ifdef CONFIG_MODULES
9707 register_module_notifier(&trace_module_nb);
9708#endif
9709
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009710#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009711 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009712 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009713#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009714
Wei Yang22c36b12020-07-12 09:10:36 +08009715 create_trace_instances(NULL);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009716
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009717 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009718
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009719 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009720}
9721
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -04009722fs_initcall(tracer_init_tracefs);
9723
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009724static int trace_panic_handler(struct notifier_block *this,
9725 unsigned long event, void *unused)
9726{
Steven Rostedt944ac422008-10-23 19:26:08 -04009727 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009728 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009729 return NOTIFY_OK;
9730}
9731
9732static struct notifier_block trace_panic_notifier = {
9733 .notifier_call = trace_panic_handler,
9734 .next = NULL,
9735 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9736};
9737
9738static int trace_die_handler(struct notifier_block *self,
9739 unsigned long val,
9740 void *data)
9741{
9742 switch (val) {
9743 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009744 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009745 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009746 break;
9747 default:
9748 break;
9749 }
9750 return NOTIFY_OK;
9751}
9752
9753static struct notifier_block trace_die_notifier = {
9754 .notifier_call = trace_die_handler,
9755 .priority = 200
9756};
9757
9758/*
9759 * printk is set to max of 1024, we really don't need it that big.
9760 * Nothing should be printing 1000 characters anyway.
9761 */
9762#define TRACE_MAX_PRINT 1000
9763
9764/*
9765 * Define here KERN_TRACE so that we have one place to modify
9766 * it if we decide to change what log level the ftrace dump
9767 * should be at.
9768 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009769#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009770
Jason Wessel955b61e2010-08-05 09:22:23 -05009771void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009772trace_printk_seq(struct trace_seq *s)
9773{
9774 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009775 if (s->seq.len >= TRACE_MAX_PRINT)
9776 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009777
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009778 /*
9779 * More paranoid code. Although the buffer size is set to
9780 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9781 * an extra layer of protection.
9782 */
9783 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9784 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009785
9786 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009787 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009788
9789 printk(KERN_TRACE "%s", s->buffer);
9790
Steven Rostedtf9520752009-03-02 14:04:40 -05009791 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009792}
9793
Jason Wessel955b61e2010-08-05 09:22:23 -05009794void trace_init_global_iter(struct trace_iterator *iter)
9795{
9796 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009797 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009798 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009799 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009800
9801 if (iter->trace && iter->trace->open)
9802 iter->trace->open(iter);
9803
9804 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009805 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009806 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9807
9808 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9809 if (trace_clocks[iter->tr->clock_id].in_ns)
9810 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009811}
9812
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009813void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009814{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009815 /* use static because iter can be a bit big for the stack */
9816 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009817 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009818 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009819 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009820 unsigned long flags;
9821 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009822
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009823 /* Only allow one dump user at a time. */
9824 if (atomic_inc_return(&dump_running) != 1) {
9825 atomic_dec(&dump_running);
9826 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009827 }
9828
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009829 /*
9830 * Always turn off tracing when we dump.
9831 * We don't need to show trace output of what happens
9832 * between multiple crashes.
9833 *
9834 * If the user does a sysrq-z, then they can re-enable
9835 * tracing with echo 1 > tracing_on.
9836 */
9837 tracing_off();
9838
9839 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009840
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009841 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009842 trace_init_global_iter(&iter);
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09009843 /* Can not use kmalloc for iter.temp and iter.fmt */
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009844 iter.temp = static_temp_buf;
9845 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09009846 iter.fmt = static_fmt_buf;
9847 iter.fmt_size = STATIC_FMT_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009848
Steven Rostedtd7690412008-10-01 00:29:53 -04009849 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009850 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009851 }
9852
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009853 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009854
Török Edwinb54d3de2008-11-22 13:28:48 +02009855 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009856 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009857
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009858 switch (oops_dump_mode) {
9859 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009860 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009861 break;
9862 case DUMP_ORIG:
9863 iter.cpu_file = raw_smp_processor_id();
9864 break;
9865 case DUMP_NONE:
9866 goto out_enable;
9867 default:
9868 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009869 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009870 }
9871
9872 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009873
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009874 /* Did function tracer already get disabled? */
9875 if (ftrace_is_dead()) {
9876 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9877 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9878 }
9879
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009880 /*
Randy Dunlap5c8c2062020-08-06 20:32:59 -07009881 * We need to stop all tracing on all CPUS to read
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009882 * the next buffer. This is a bit expensive, but is
9883 * not done often. We fill all what we can read,
9884 * and then release the locks again.
9885 */
9886
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009887 while (!trace_empty(&iter)) {
9888
9889 if (!cnt)
9890 printk(KERN_TRACE "---------------------------------\n");
9891
9892 cnt++;
9893
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009894 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009895 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009896
Jason Wessel955b61e2010-08-05 09:22:23 -05009897 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009898 int ret;
9899
9900 ret = print_trace_line(&iter);
9901 if (ret != TRACE_TYPE_NO_CONSUME)
9902 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009903 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009904 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009905
9906 trace_printk_seq(&iter.seq);
9907 }
9908
9909 if (!cnt)
9910 printk(KERN_TRACE " (ftrace buffer empty)\n");
9911 else
9912 printk(KERN_TRACE "---------------------------------\n");
9913
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009914 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009915 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009916
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009917 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009918 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009919 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009920 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009921 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009922}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009923EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009924
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009925#define WRITE_BUFSIZE 4096
9926
9927ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9928 size_t count, loff_t *ppos,
Masami Hiramatsud2622712021-02-01 13:48:11 -06009929 int (*createfn)(const char *))
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009930{
9931 char *kbuf, *buf, *tmp;
9932 int ret = 0;
9933 size_t done = 0;
9934 size_t size;
9935
9936 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9937 if (!kbuf)
9938 return -ENOMEM;
9939
9940 while (done < count) {
9941 size = count - done;
9942
9943 if (size >= WRITE_BUFSIZE)
9944 size = WRITE_BUFSIZE - 1;
9945
9946 if (copy_from_user(kbuf, buffer + done, size)) {
9947 ret = -EFAULT;
9948 goto out;
9949 }
9950 kbuf[size] = '\0';
9951 buf = kbuf;
9952 do {
9953 tmp = strchr(buf, '\n');
9954 if (tmp) {
9955 *tmp = '\0';
9956 size = tmp - buf + 1;
9957 } else {
9958 size = strlen(buf);
9959 if (done + size < count) {
9960 if (buf != kbuf)
9961 break;
9962 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9963 pr_warn("Line length is too long: Should be less than %d\n",
9964 WRITE_BUFSIZE - 2);
9965 ret = -EINVAL;
9966 goto out;
9967 }
9968 }
9969 done += size;
9970
9971 /* Remove comments */
9972 tmp = strchr(buf, '#');
9973
9974 if (tmp)
9975 *tmp = '\0';
9976
Masami Hiramatsud2622712021-02-01 13:48:11 -06009977 ret = createfn(buf);
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009978 if (ret)
9979 goto out;
9980 buf += size;
9981
9982 } while (done < count);
9983 }
9984 ret = done;
9985
9986out:
9987 kfree(kbuf);
9988
9989 return ret;
9990}
9991
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009992__init static int tracer_alloc_buffers(void)
9993{
Steven Rostedt73c51622009-03-11 13:42:01 -04009994 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309995 int ret = -ENOMEM;
9996
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009997
9998 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009999 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -050010000 return -EPERM;
10001 }
10002
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -040010003 /*
Qiujun Huang499f7bb2020-10-10 22:09:24 +080010004 * Make sure we don't accidentally add more trace options
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -040010005 * than we have bits for.
10006 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -040010007 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -040010008
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010009 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10010 goto out;
10011
Alexander Z Lamccfe9e42013-08-08 09:47:45 -070010012 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010013 goto out_free_buffer_mask;
10014
Steven Rostedt07d777f2011-09-22 14:01:55 -040010015 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -070010016 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -040010017 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -040010018 trace_printk_init_buffers();
10019
Steven Rostedt73c51622009-03-11 13:42:01 -040010020 /* To save memory, keep the ring buffer size to its minimum */
10021 if (ring_buffer_expanded)
10022 ring_buf_size = trace_buf_size;
10023 else
10024 ring_buf_size = 1;
10025
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010026 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -070010027 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020010028
Steven Rostedt2b6080f2012-05-11 13:29:49 -040010029 raw_spin_lock_init(&global_trace.start_lock);
10030
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +010010031 /*
10032 * The prepare callbacks allocates some memory for the ring buffer. We
Qiujun Huang499f7bb2020-10-10 22:09:24 +080010033 * don't free the buffer if the CPU goes down. If we were to free
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +010010034 * the buffer, then the user would lose any trace that was in the
10035 * buffer. The memory will be removed once the "instance" is removed.
10036 */
10037 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10038 "trace/RB:preapre", trace_rb_cpu_prepare,
10039 NULL);
10040 if (ret < 0)
10041 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -040010042 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +030010043 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -040010044 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10045 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +010010046 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -040010047
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +090010048 if (trace_create_savedcmd() < 0)
10049 goto out_free_temp_buffer;
10050
Steven Rostedtab464282008-05-12 21:21:00 +020010051 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -050010052 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -050010053 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +090010054 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -040010055 }
Steven Rostedta7603ff2012-08-06 16:24:11 -040010056
Steven Rostedt499e5472012-02-22 15:50:28 -050010057 if (global_trace.buffer_disabled)
10058 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -040010059
Steven Rostedte1e232c2014-02-10 23:38:46 -050010060 if (trace_boot_clock) {
10061 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10062 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -070010063 pr_warn("Trace clock %s not defined, going back to default\n",
10064 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -050010065 }
10066
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -040010067 /*
10068 * register_tracer() might reference current_trace, so it
10069 * needs to be set before we register anything. This is
10070 * just a bootstrap of current_trace anyway.
10071 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -040010072 global_trace.current_trace = &nop_trace;
10073
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -050010074 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10075
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050010076 ftrace_init_global_array_ops(&global_trace);
10077
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -040010078 init_trace_flags_index(&global_trace);
10079
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -040010080 register_tracer(&nop_trace);
10081
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -050010082 /* Function tracing may start here (via kernel command line) */
10083 init_function_trace();
10084
Steven Rostedt60a11772008-05-12 21:20:44 +020010085 /* All seems OK, enable tracing */
10086 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -040010087
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040010088 atomic_notifier_chain_register(&panic_notifier_list,
10089 &trace_panic_notifier);
10090
10091 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +010010092
Steven Rostedtae63b31e2012-05-03 23:09:03 -040010093 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10094
10095 INIT_LIST_HEAD(&global_trace.systems);
10096 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -060010097 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -040010098 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -040010099 list_add(&global_trace.list, &ftrace_trace_arrays);
10100
Jiaxing Wanga4d1e682015-11-04 09:14:29 +080010101 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -040010102
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -040010103 register_snapshot_cmd();
10104
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -050010105 test_can_verify();
10106
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +010010107 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040010108
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +090010109out_free_savedcmd:
10110 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -040010111out_free_temp_buffer:
10112 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +010010113out_rm_hp_state:
10114 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010115out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -070010116 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010117out_free_buffer_mask:
10118 free_cpumask_var(tracing_buffer_mask);
10119out:
10120 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020010121}
Steven Rostedtb2821ae2009-02-02 21:38:32 -050010122
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -050010123void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -050010124{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050010125 if (tracepoint_printk) {
10126 tracepoint_print_iter =
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -040010127 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -050010128 if (MEM_FAIL(!tracepoint_print_iter,
10129 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050010130 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050010131 else
10132 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050010133 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -050010134 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -050010135}
10136
10137void __init trace_init(void)
10138{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -040010139 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -050010140}
10141
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010142__init static void clear_boot_tracer(void)
Steven Rostedtb2821ae2009-02-02 21:38:32 -050010143{
10144 /*
10145 * The default tracer at boot buffer is an init section.
10146 * This function is called in lateinit. If we did not
10147 * find the boot tracer, then clear it out, to prevent
10148 * later registration from accessing the buffer that is
10149 * about to be freed.
10150 */
10151 if (!default_bootup_tracer)
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010152 return;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050010153
10154 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10155 default_bootup_tracer);
10156 default_bootup_tracer = NULL;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050010157}
10158
Chris Wilson3fd49c92018-03-30 16:01:31 +010010159#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010160__init static void tracing_set_default_clock(void)
Chris Wilson3fd49c92018-03-30 16:01:31 +010010161{
10162 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +010010163 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +090010164 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10165 pr_warn("Can not set tracing clock due to lockdown\n");
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010166 return;
Masami Ichikawabf24daa2020-01-16 22:12:36 +090010167 }
10168
Chris Wilson3fd49c92018-03-30 16:01:31 +010010169 printk(KERN_WARNING
10170 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10171 "If you want to keep using the local clock, then add:\n"
10172 " \"trace_clock=local\"\n"
10173 "on the kernel command line\n");
10174 tracing_set_clock(&global_trace, "global");
10175 }
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010176}
10177#else
10178static inline void tracing_set_default_clock(void) { }
10179#endif
Chris Wilson3fd49c92018-03-30 16:01:31 +010010180
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010181__init static int late_trace_init(void)
10182{
10183 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10184 static_key_disable(&tracepoint_printk_key.key);
10185 tracepoint_printk = 0;
10186 }
10187
10188 tracing_set_default_clock();
10189 clear_boot_tracer();
Chris Wilson3fd49c92018-03-30 16:01:31 +010010190 return 0;
10191}
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010192
10193late_initcall_sync(late_trace_init);