blob: c860f582b0788f97f2517f3372dfe3b7c3e76b60 [file] [log] [blame]
Steven Rostedt (VMware)bcea3f92018-08-16 11:23:53 -04001// SPDX-License-Identifier: GPL-2.0
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002/*
3 * ring buffer based function tracer
4 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010013 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020014 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050015#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020016#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050017#include <linux/stacktrace.h>
18#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020019#include <linux/kallsyms.h>
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -040020#include <linux/security.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020021#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040022#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050023#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050025#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020026#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020027#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -040030#include <linux/vmalloc.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020031#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050034#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040035#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010036#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050037#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080038#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/ctype.h>
41#include <linux/init.h>
Andy Shevchenkof39650d2021-06-30 18:54:59 -070042#include <linux/panic_notifier.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020043#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050044#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include <linux/fs.h>
Chunyan Zhang478409d2016-11-21 15:57:18 +080046#include <linux/trace.h>
Chris Wilson3fd49c92018-03-30 16:01:31 +010047#include <linux/sched/clock.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060048#include <linux/sched/rt.h>
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +020049#include <linux/fsnotify.h>
50#include <linux/irq_work.h>
51#include <linux/workqueue.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020052
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020053#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050054#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020055
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010056/*
Steven Rostedt73c51622009-03-11 13:42:01 -040057 * On boot up, the ring buffer is set to the minimum size, so that
58 * we do not waste memory on systems that are not using tracing.
59 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050060bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040061
62/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010063 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010064 * A selftest will lurk into the ring-buffer to count the
65 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010066 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010067 * at the same time, giving false positive or negative results.
68 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010069static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010070
Steven Rostedtb2821ae2009-02-02 21:38:32 -050071/*
Masami Hiramatsu60efe212020-12-08 17:54:09 +090072 * If boot-time tracing including tracers/events via kernel cmdline
73 * is running, we do not want to run SELFTEST.
Steven Rostedtb2821ae2009-02-02 21:38:32 -050074 */
Li Zefan020e5f82009-07-01 10:47:05 +080075bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050076
Masami Hiramatsu60efe212020-12-08 17:54:09 +090077#ifdef CONFIG_FTRACE_STARTUP_TEST
78void __init disable_tracing_selftest(const char *reason)
79{
80 if (!tracing_selftest_disabled) {
81 tracing_selftest_disabled = true;
82 pr_info("Ftrace startup test is disabled due to %s\n", reason);
83 }
84}
85#endif
86
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050087/* Pipe tracepoints to printk */
88struct trace_iterator *tracepoint_print_iter;
89int tracepoint_printk;
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040090static bool tracepoint_printk_stop_on_boot __initdata;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050091static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050092
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010093/* For tracers that don't implement custom flags */
94static struct tracer_opt dummy_tracer_opt[] = {
95 { }
96};
97
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050098static int
99dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +0100100{
101 return 0;
102}
Steven Rostedt0f048702008-11-05 16:05:44 -0500103
104/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400105 * To prevent the comm cache from being overwritten when no
106 * tracing is active, only save the comm when a trace event
107 * occurred.
108 */
Joel Fernandesd914ba32017-06-26 19:01:55 -0700109static DEFINE_PER_CPU(bool, trace_taskinfo_save);
Steven Rostedt7ffbd482012-10-11 12:14:25 -0400110
111/*
Steven Rostedt0f048702008-11-05 16:05:44 -0500112 * Kill all tracing for good (never come back).
113 * It is initialized to 1 but will turn to zero if the initialization
114 * of the tracer is successful. But that is the only place that sets
115 * this back to zero.
116 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100117static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500118
Jason Wessel955b61e2010-08-05 09:22:23 -0500119cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200120
Steven Rostedt944ac422008-10-23 19:26:08 -0400121/*
122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123 *
124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
125 * is set, then ftrace_dump is called. This will output the contents
126 * of the ftrace buffers to the console. This is very useful for
127 * capturing traces that lead to crashes and outputing it to a
128 * serial console.
129 *
130 * It is default off, but you can enable it with either specifying
131 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200132 * /proc/sys/kernel/ftrace_dump_on_oops
133 * Set 1 if you want to dump buffers of all CPUs
134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400135 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200136
137enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400138
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400139/* When set, tracing will stop when a WARN*() is hit */
140int __disable_trace_on_warning;
141
Jeremy Linton681bec02017-05-31 16:56:53 -0500142#ifdef CONFIG_TRACE_EVAL_MAP_FILE
143/* Map of enums to their values, for "eval_map" file */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500144struct trace_eval_map_head {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400145 struct module *mod;
146 unsigned long length;
147};
148
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500149union trace_eval_map_item;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400150
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500151struct trace_eval_map_tail {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400152 /*
153 * "end" is first and points to NULL as it must be different
Jeremy Linton00f4b652017-05-31 16:56:43 -0500154 * than "mod" or "eval_string"
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400155 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500156 union trace_eval_map_item *next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400157 const char *end; /* points to NULL */
158};
159
Jeremy Linton1793ed92017-05-31 16:56:46 -0500160static DEFINE_MUTEX(trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400161
162/*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500163 * The trace_eval_maps are saved in an array with two extra elements,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400164 * one at the beginning, and one at the end. The beginning item contains
165 * the count of the saved maps (head.length), and the module they
166 * belong to if not built in (head.mod). The ending item contains a
Jeremy Linton681bec02017-05-31 16:56:53 -0500167 * pointer to the next array of saved eval_map items.
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400168 */
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500169union trace_eval_map_item {
Jeremy Linton00f4b652017-05-31 16:56:43 -0500170 struct trace_eval_map map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500171 struct trace_eval_map_head head;
172 struct trace_eval_map_tail tail;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400173};
174
Jeremy Linton23bf8cb2017-05-31 16:56:45 -0500175static union trace_eval_map_item *trace_eval_maps;
Jeremy Linton681bec02017-05-31 16:56:53 -0500176#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400177
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +0900178int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -0500179static void ftrace_trace_userstack(struct trace_array *tr,
180 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100181 unsigned int trace_ctx);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500182
Li Zefanee6c2c12009-09-18 14:06:47 +0800183#define MAX_TRACER_SIZE 100
184static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500185static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100186
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500187static bool allocate_snapshot;
188
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200189static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100190{
Chen Gang67012ab2013-04-08 12:06:44 +0800191 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500192 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400193 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500194 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100195 return 1;
196}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200197__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100198
Steven Rostedt944ac422008-10-23 19:26:08 -0400199static int __init set_ftrace_dump_on_oops(char *str)
200{
Steven Rostedt (VMware)2db7ab62021-06-17 16:20:41 -0400201 if (*str++ != '=' || !*str || !strcmp("1", str)) {
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200202 ftrace_dump_on_oops = DUMP_ALL;
203 return 1;
204 }
205
Steven Rostedt (VMware)2db7ab62021-06-17 16:20:41 -0400206 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200207 ftrace_dump_on_oops = DUMP_ORIG;
208 return 1;
209 }
210
211 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400212}
213__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200214
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400215static int __init stop_trace_on_warning(char *str)
216{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200217 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
218 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400219 return 1;
220}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200221__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400222
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400223static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500224{
225 allocate_snapshot = true;
226 /* We also need the main ring buffer expanded */
227 ring_buffer_expanded = true;
228 return 1;
229}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400230__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500231
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400232
233static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400234
235static int __init set_trace_boot_options(char *str)
236{
Chen Gang67012ab2013-04-08 12:06:44 +0800237 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400238 return 0;
239}
240__setup("trace_options=", set_trace_boot_options);
241
Steven Rostedte1e232c2014-02-10 23:38:46 -0500242static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
243static char *trace_boot_clock __initdata;
244
245static int __init set_trace_boot_clock(char *str)
246{
247 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
248 trace_boot_clock = trace_boot_clock_buf;
249 return 0;
250}
251__setup("trace_clock=", set_trace_boot_clock);
252
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500253static int __init set_tracepoint_printk(char *str)
254{
255 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
256 tracepoint_printk = 1;
257 return 1;
258}
259__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400260
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -0400261static int __init set_tracepoint_printk_stop(char *str)
262{
263 tracepoint_printk_stop_on_boot = true;
264 return 1;
265}
266__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
267
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100268unsigned long long ns2usecs(u64 nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200269{
270 nsec += 500;
271 do_div(nsec, 1000);
272 return nsec;
273}
274
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300275static void
276trace_process_export(struct trace_export *export,
277 struct ring_buffer_event *event, int flag)
278{
279 struct trace_entry *entry;
280 unsigned int size = 0;
281
282 if (export->flags & flag) {
283 entry = ring_buffer_event_data(event);
284 size = ring_buffer_event_length(event);
285 export->write(export, entry, size);
286 }
287}
288
289static DEFINE_MUTEX(ftrace_export_lock);
290
291static struct trace_export __rcu *ftrace_exports_list __read_mostly;
292
293static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
294static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300295static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300296
297static inline void ftrace_exports_enable(struct trace_export *export)
298{
299 if (export->flags & TRACE_EXPORT_FUNCTION)
300 static_branch_inc(&trace_function_exports_enabled);
301
302 if (export->flags & TRACE_EXPORT_EVENT)
303 static_branch_inc(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300304
305 if (export->flags & TRACE_EXPORT_MARKER)
306 static_branch_inc(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300307}
308
309static inline void ftrace_exports_disable(struct trace_export *export)
310{
311 if (export->flags & TRACE_EXPORT_FUNCTION)
312 static_branch_dec(&trace_function_exports_enabled);
313
314 if (export->flags & TRACE_EXPORT_EVENT)
315 static_branch_dec(&trace_event_exports_enabled);
Tingwei Zhang458999c2020-10-05 10:13:15 +0300316
317 if (export->flags & TRACE_EXPORT_MARKER)
318 static_branch_dec(&trace_marker_exports_enabled);
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +0300319}
320
321static void ftrace_exports(struct ring_buffer_event *event, int flag)
322{
323 struct trace_export *export;
324
325 preempt_disable_notrace();
326
327 export = rcu_dereference_raw_check(ftrace_exports_list);
328 while (export) {
329 trace_process_export(export, event, flag);
330 export = rcu_dereference_raw_check(export->next);
331 }
332
333 preempt_enable_notrace();
334}
335
336static inline void
337add_trace_export(struct trace_export **list, struct trace_export *export)
338{
339 rcu_assign_pointer(export->next, *list);
340 /*
341 * We are entering export into the list but another
342 * CPU might be walking that list. We need to make sure
343 * the export->next pointer is valid before another CPU sees
344 * the export pointer included into the list.
345 */
346 rcu_assign_pointer(*list, export);
347}
348
349static inline int
350rm_trace_export(struct trace_export **list, struct trace_export *export)
351{
352 struct trace_export **p;
353
354 for (p = list; *p != NULL; p = &(*p)->next)
355 if (*p == export)
356 break;
357
358 if (*p != export)
359 return -1;
360
361 rcu_assign_pointer(*p, (*p)->next);
362
363 return 0;
364}
365
366static inline void
367add_ftrace_export(struct trace_export **list, struct trace_export *export)
368{
369 ftrace_exports_enable(export);
370
371 add_trace_export(list, export);
372}
373
374static inline int
375rm_ftrace_export(struct trace_export **list, struct trace_export *export)
376{
377 int ret;
378
379 ret = rm_trace_export(list, export);
380 ftrace_exports_disable(export);
381
382 return ret;
383}
384
385int register_ftrace_export(struct trace_export *export)
386{
387 if (WARN_ON_ONCE(!export->write))
388 return -1;
389
390 mutex_lock(&ftrace_export_lock);
391
392 add_ftrace_export(&ftrace_exports_list, export);
393
394 mutex_unlock(&ftrace_export_lock);
395
396 return 0;
397}
398EXPORT_SYMBOL_GPL(register_ftrace_export);
399
400int unregister_ftrace_export(struct trace_export *export)
401{
402 int ret;
403
404 mutex_lock(&ftrace_export_lock);
405
406 ret = rm_ftrace_export(&ftrace_exports_list, export);
407
408 mutex_unlock(&ftrace_export_lock);
409
410 return ret;
411}
412EXPORT_SYMBOL_GPL(unregister_ftrace_export);
413
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400414/* trace_flags holds trace_options default values */
415#define TRACE_DEFAULT_FLAGS \
416 (FUNCTION_DEFAULT_FLAGS | \
417 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
418 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
419 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
Steven Rostedt (VMware)99e22ce2021-02-12 11:51:06 -0500420 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
421 TRACE_ITER_HASH_PTR)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400422
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400423/* trace_options that are only supported by global_trace */
424#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
425 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
426
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -0400427/* trace_flags that are default zero for instances */
428#define ZEROED_TRACE_FLAGS \
Namhyung Kim1e104862017-04-17 11:44:28 +0900429 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400430
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200431/*
Joel Fernandes67d04bb2017-02-16 20:10:58 -0800432 * The global_trace is the descriptor that holds the top-level tracing
433 * buffers for the live tracing.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200434 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400435static struct trace_array global_trace = {
436 .trace_flags = TRACE_DEFAULT_FLAGS,
437};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200438
Steven Rostedtae63b31e2012-05-03 23:09:03 -0400439LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200440
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400441int trace_array_get(struct trace_array *this_tr)
442{
443 struct trace_array *tr;
444 int ret = -ENODEV;
445
446 mutex_lock(&trace_types_lock);
447 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
448 if (tr == this_tr) {
449 tr->ref++;
450 ret = 0;
451 break;
452 }
453 }
454 mutex_unlock(&trace_types_lock);
455
456 return ret;
457}
458
459static void __trace_array_put(struct trace_array *this_tr)
460{
461 WARN_ON(!this_tr->ref);
462 this_tr->ref--;
463}
464
Divya Indi28879782019-11-20 11:08:38 -0800465/**
466 * trace_array_put - Decrement the reference counter for this trace array.
Bean Huo557d50e2021-01-12 12:12:02 +0100467 * @this_tr : pointer to the trace array
Divya Indi28879782019-11-20 11:08:38 -0800468 *
469 * NOTE: Use this when we no longer need the trace array returned by
470 * trace_array_get_by_name(). This ensures the trace array can be later
471 * destroyed.
472 *
473 */
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400474void trace_array_put(struct trace_array *this_tr)
475{
Divya Indi28879782019-11-20 11:08:38 -0800476 if (!this_tr)
477 return;
478
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400479 mutex_lock(&trace_types_lock);
480 __trace_array_put(this_tr);
481 mutex_unlock(&trace_types_lock);
482}
Divya Indi28879782019-11-20 11:08:38 -0800483EXPORT_SYMBOL_GPL(trace_array_put);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400484
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400485int tracing_check_open_get_tr(struct trace_array *tr)
486{
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -0400487 int ret;
488
489 ret = security_locked_down(LOCKDOWN_TRACEFS);
490 if (ret)
491 return ret;
492
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -0400493 if (tracing_disabled)
494 return -ENODEV;
495
496 if (tr && trace_array_get(tr) < 0)
497 return -ENODEV;
498
499 return 0;
500}
501
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400502int call_filter_check_discard(struct trace_event_call *call, void *rec,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500503 struct trace_buffer *buffer,
Tom Zanussif306cc82013-10-24 08:34:17 -0500504 struct ring_buffer_event *event)
505{
506 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
507 !filter_match_preds(call->filter, rec)) {
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -0400508 __trace_event_discard_commit(buffer, event);
Tom Zanussif306cc82013-10-24 08:34:17 -0500509 return 1;
510 }
511
512 return 0;
513}
Tom Zanussieb02ce02009-04-08 03:15:54 -0500514
Steven Rostedtd8275c42016-04-14 12:15:22 -0400515/**
516 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
517 * @filtered_pids: The list of pids to check
518 * @search_pid: The PID to find in @filtered_pids
519 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100520 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400521 */
522bool
523trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
524{
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400525 return trace_pid_list_is_set(filtered_pids, search_pid);
Steven Rostedtd8275c42016-04-14 12:15:22 -0400526}
527
528/**
529 * trace_ignore_this_task - should a task be ignored for tracing
530 * @filtered_pids: The list of pids to check
Qiujun Huangb3ca59f2020-12-31 10:35:58 -0500531 * @filtered_no_pids: The list of pids not to be traced
Steven Rostedtd8275c42016-04-14 12:15:22 -0400532 * @task: The task that should be ignored if not filtered
533 *
534 * Checks if @task should be traced or not from @filtered_pids.
535 * Returns true if @task should *NOT* be traced.
536 * Returns false if @task should be traced.
537 */
538bool
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400539trace_ignore_this_task(struct trace_pid_list *filtered_pids,
540 struct trace_pid_list *filtered_no_pids,
541 struct task_struct *task)
Steven Rostedtd8275c42016-04-14 12:15:22 -0400542{
543 /*
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100544 * If filtered_no_pids is not empty, and the task's pid is listed
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400545 * in filtered_no_pids, then return true.
546 * Otherwise, if filtered_pids is empty, that means we can
547 * trace all tasks. If it has content, then only trace pids
548 * within filtered_pids.
Steven Rostedtd8275c42016-04-14 12:15:22 -0400549 */
Steven Rostedtd8275c42016-04-14 12:15:22 -0400550
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -0400551 return (filtered_pids &&
552 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
553 (filtered_no_pids &&
554 trace_find_filtered_pid(filtered_no_pids, task->pid));
Steven Rostedtd8275c42016-04-14 12:15:22 -0400555}
556
557/**
Matthias Kaehlckef08367b2019-05-23 12:26:28 -0700558 * trace_filter_add_remove_task - Add or remove a task from a pid_list
Steven Rostedtd8275c42016-04-14 12:15:22 -0400559 * @pid_list: The list to modify
560 * @self: The current task for fork or NULL for exit
561 * @task: The task to add or remove
562 *
563 * If adding a task, if @self is defined, the task is only added if @self
564 * is also included in @pid_list. This happens on fork and tasks should
565 * only be added when the parent is listed. If @self is NULL, then the
566 * @task pid will be removed from the list, which would happen on exit
567 * of a task.
568 */
569void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
570 struct task_struct *self,
571 struct task_struct *task)
572{
573 if (!pid_list)
574 return;
575
576 /* For forks, we only add if the forking task is listed */
577 if (self) {
578 if (!trace_find_filtered_pid(pid_list, self->pid))
579 return;
580 }
581
Steven Rostedtd8275c42016-04-14 12:15:22 -0400582 /* "self" is set for forks, and NULL for exits */
583 if (self)
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400584 trace_pid_list_set(pid_list, task->pid);
Steven Rostedtd8275c42016-04-14 12:15:22 -0400585 else
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400586 trace_pid_list_clear(pid_list, task->pid);
Steven Rostedtd8275c42016-04-14 12:15:22 -0400587}
588
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400589/**
590 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
591 * @pid_list: The pid list to show
592 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
593 * @pos: The position of the file
594 *
595 * This is used by the seq_file "next" operation to iterate the pids
596 * listed in a trace_pid_list structure.
597 *
598 * Returns the pid+1 as we want to display pid of zero, but NULL would
599 * stop the iteration.
600 */
601void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
602{
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400603 long pid = (unsigned long)v;
604 unsigned int next;
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400605
606 (*pos)++;
607
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100608 /* pid already is +1 of the actual previous bit */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400609 if (trace_pid_list_next(pid_list, pid, &next) < 0)
610 return NULL;
611
612 pid = next;
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400613
614 /* Return pid + 1 to allow zero to be represented */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400615 return (void *)(pid + 1);
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400616}
617
618/**
619 * trace_pid_start - Used for seq_file to start reading pid lists
620 * @pid_list: The pid list to show
621 * @pos: The position of the file
622 *
623 * This is used by seq_file "start" operation to start the iteration
624 * of listing pids.
625 *
626 * Returns the pid+1 as we want to display pid of zero, but NULL would
627 * stop the iteration.
628 */
629void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
630{
631 unsigned long pid;
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400632 unsigned int first;
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400633 loff_t l = 0;
634
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400635 if (trace_pid_list_first(pid_list, &first) < 0)
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400636 return NULL;
637
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400638 pid = first;
639
Steven Rostedt (Red Hat)5cc89762016-04-20 15:19:54 -0400640 /* Return pid + 1 so that zero can be the exit value */
641 for (pid++; pid && l < *pos;
642 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
643 ;
644 return (void *)pid;
645}
646
647/**
648 * trace_pid_show - show the current pid in seq_file processing
649 * @m: The seq_file structure to write into
650 * @v: A void pointer of the pid (+1) value to display
651 *
652 * Can be directly used by seq_file operations to display the current
653 * pid value.
654 */
655int trace_pid_show(struct seq_file *m, void *v)
656{
657 unsigned long pid = (unsigned long)v - 1;
658
659 seq_printf(m, "%lu\n", pid);
660 return 0;
661}
662
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400663/* 128 should be much more than enough */
664#define PID_BUF_SIZE 127
665
666int trace_pid_write(struct trace_pid_list *filtered_pids,
667 struct trace_pid_list **new_pid_list,
668 const char __user *ubuf, size_t cnt)
669{
670 struct trace_pid_list *pid_list;
671 struct trace_parser parser;
672 unsigned long val;
673 int nr_pids = 0;
674 ssize_t read = 0;
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400675 ssize_t ret;
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400676 loff_t pos;
677 pid_t pid;
678
679 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
680 return -ENOMEM;
681
682 /*
683 * Always recreate a new array. The write is an all or nothing
684 * operation. Always create a new array when adding new pids by
685 * the user. If the operation fails, then the current list is
686 * not modified.
687 */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400688 pid_list = trace_pid_list_alloc();
Wenwen Wang91862cc2019-04-19 21:22:59 -0500689 if (!pid_list) {
690 trace_parser_put(&parser);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400691 return -ENOMEM;
Wenwen Wang91862cc2019-04-19 21:22:59 -0500692 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400693
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400694 if (filtered_pids) {
695 /* copy the current bits to the new max */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400696 ret = trace_pid_list_first(filtered_pids, &pid);
697 while (!ret) {
698 trace_pid_list_set(pid_list, pid);
699 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400700 nr_pids++;
701 }
702 }
703
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400704 ret = 0;
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400705 while (cnt > 0) {
706
707 pos = 0;
708
709 ret = trace_get_user(&parser, ubuf, cnt, &pos);
710 if (ret < 0 || !trace_parser_loaded(&parser))
711 break;
712
713 read += ret;
714 ubuf += ret;
715 cnt -= ret;
716
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400717 ret = -EINVAL;
718 if (kstrtoul(parser.buffer, 0, &val))
719 break;
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400720
721 pid = (pid_t)val;
722
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400723 if (trace_pid_list_set(pid_list, pid) < 0) {
724 ret = -1;
725 break;
726 }
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400727 nr_pids++;
728
729 trace_parser_clear(&parser);
730 ret = 0;
731 }
732 trace_parser_put(&parser);
733
734 if (ret < 0) {
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400735 trace_pid_list_free(pid_list);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400736 return ret;
737 }
738
739 if (!nr_pids) {
740 /* Cleared the list of pids */
Steven Rostedt (VMware)6954e412021-09-23 21:03:49 -0400741 trace_pid_list_free(pid_list);
Steven Rostedt (Red Hat)76c813e2016-04-21 11:35:30 -0400742 read = ret;
743 pid_list = NULL;
744 }
745
746 *new_pid_list = pid_list;
747
748 return read;
749}
750
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500751static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400752{
753 u64 ts;
754
755 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700756 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400757 return trace_clock_local();
758
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +0300759 ts = ring_buffer_time_stamp(buf->buffer);
Alexander Z Lam94571582013-08-02 18:36:16 -0700760 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400761
762 return ts;
763}
764
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100765u64 ftrace_now(int cpu)
Alexander Z Lam94571582013-08-02 18:36:16 -0700766{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500767 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
Alexander Z Lam94571582013-08-02 18:36:16 -0700768}
769
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400770/**
Qiujun Huangb3ca59f2020-12-31 10:35:58 -0500771 * tracing_is_enabled - Show if global_trace has been enabled
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400772 *
773 * Shows if the global trace has been enabled or not. It uses the
774 * mirror flag "buffer_disabled" to be used in fast paths such as for
775 * the irqsoff tracer. But it may be inaccurate due to races. If you
776 * need to know the accurate state, use tracing_is_on() which is a little
777 * slower, but accurate.
778 */
Steven Rostedt90369902008-11-05 16:05:44 -0500779int tracing_is_enabled(void)
780{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400781 /*
782 * For quick access (irqsoff uses this in fast path), just
783 * return the mirror variable of the state of the ring buffer.
784 * It's a little racy, but we don't really care.
785 */
786 smp_rmb();
787 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500788}
789
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200790/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400791 * trace_buf_size is the size in bytes that is allocated
792 * for a buffer. Note, the number of bytes is always rounded
793 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400794 *
795 * This number is purposely set to a low number of 16384.
796 * If the dump on oops happens, it will be much appreciated
797 * to not have to wait for all that output. Anyway this can be
798 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200799 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400800#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400801
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400802static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200803
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200804/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200805static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200806
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200807/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200808 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200809 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700810DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200811
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800812/*
813 * serialize the access of the ring buffer
814 *
815 * ring buffer serializes readers, but it is low level protection.
816 * The validity of the events (which returns by ring_buffer_peek() ..etc)
817 * are not protected by ring buffer.
818 *
819 * The content of events may become garbage if we allow other process consumes
820 * these events concurrently:
821 * A) the page of the consumed events may become a normal page
Ingo Molnarf2cc0202021-03-23 18:49:35 +0100822 * (not reader page) in ring buffer, and this page will be rewritten
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800823 * by events producer.
824 * B) The page of the consumed events may become a page for splice_read,
825 * and this page will be returned to system.
826 *
827 * These primitives allow multi process access to different cpu ring buffer
828 * concurrently.
829 *
830 * These primitives don't distinguish read-only and read-consume access.
831 * Multi read-only access are also serialized.
832 */
833
834#ifdef CONFIG_SMP
835static DECLARE_RWSEM(all_cpu_access_lock);
836static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
837
838static inline void trace_access_lock(int cpu)
839{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500840 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800841 /* gain it for accessing the whole ring buffer. */
842 down_write(&all_cpu_access_lock);
843 } else {
844 /* gain it for accessing a cpu ring buffer. */
845
Steven Rostedtae3b5092013-01-23 15:22:59 -0500846 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800847 down_read(&all_cpu_access_lock);
848
849 /* Secondly block other access to this @cpu ring buffer. */
850 mutex_lock(&per_cpu(cpu_access_lock, cpu));
851 }
852}
853
854static inline void trace_access_unlock(int cpu)
855{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500856 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800857 up_write(&all_cpu_access_lock);
858 } else {
859 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
860 up_read(&all_cpu_access_lock);
861 }
862}
863
864static inline void trace_access_lock_init(void)
865{
866 int cpu;
867
868 for_each_possible_cpu(cpu)
869 mutex_init(&per_cpu(cpu_access_lock, cpu));
870}
871
872#else
873
874static DEFINE_MUTEX(access_lock);
875
876static inline void trace_access_lock(int cpu)
877{
878 (void)cpu;
879 mutex_lock(&access_lock);
880}
881
882static inline void trace_access_unlock(int cpu)
883{
884 (void)cpu;
885 mutex_unlock(&access_lock);
886}
887
888static inline void trace_access_lock_init(void)
889{
890}
891
892#endif
893
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400894#ifdef CONFIG_STACKTRACE
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500895static void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100896 unsigned int trace_ctx,
897 int skip, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400898static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500899 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100900 unsigned int trace_ctx,
901 int skip, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400902
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400903#else
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500904static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100905 unsigned int trace_ctx,
906 int skip, struct pt_regs *regs)
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400907{
908}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400909static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500910 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100911 unsigned long trace_ctx,
912 int skip, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400913{
914}
915
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400916#endif
917
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500918static __always_inline void
919trace_event_setup(struct ring_buffer_event *event,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100920 int type, unsigned int trace_ctx)
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500921{
922 struct trace_entry *ent = ring_buffer_event_data(event);
923
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100924 tracing_generic_entry_update(ent, type, trace_ctx);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500925}
926
927static __always_inline struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500928__trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500929 int type,
930 unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100931 unsigned int trace_ctx)
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500932{
933 struct ring_buffer_event *event;
934
935 event = ring_buffer_lock_reserve(buffer, len);
936 if (event != NULL)
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +0100937 trace_event_setup(event, type, trace_ctx);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -0500938
939 return event;
940}
941
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -0400942void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400943{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -0500944 if (tr->array_buffer.buffer)
945 ring_buffer_record_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400946 /*
947 * This flag is looked at when buffers haven't been allocated
948 * yet, or by some tracers (like irqsoff), that just want to
949 * know if the ring buffer has been disabled, but it can handle
950 * races of where it gets disabled but we still do a record.
951 * As the check is in the fast path of the tracers, it is more
952 * important to be fast than accurate.
953 */
954 tr->buffer_disabled = 0;
955 /* Make the flag seen by readers */
956 smp_wmb();
957}
958
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200959/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500960 * tracing_on - enable tracing buffers
961 *
962 * This function enables tracing buffers that may have been
963 * disabled with tracing_off.
964 */
965void tracing_on(void)
966{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400967 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500968}
969EXPORT_SYMBOL_GPL(tracing_on);
970
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500971
972static __always_inline void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500973__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500974{
Joel Fernandesd914ba32017-06-26 19:01:55 -0700975 __this_cpu_write(trace_taskinfo_save, true);
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500976
977 /* If this is the temp buffer, we need to commit fully */
978 if (this_cpu_read(trace_buffered_event) == event) {
979 /* Length is in event->array[0] */
980 ring_buffer_write(buffer, event->array[0], &event->array[1]);
981 /* Release the temp buffer */
982 this_cpu_dec(trace_buffered_event_cnt);
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -0500983 /* ring_buffer_unlock_commit() enables preemption */
984 preempt_enable_notrace();
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -0500985 } else
986 ring_buffer_unlock_commit(buffer, event);
987}
988
Steven Rostedt499e5472012-02-22 15:50:28 -0500989/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500990 * __trace_puts - write a constant string into the trace buffer.
991 * @ip: The address of the caller
992 * @str: The constant string to write
993 * @size: The size of the string.
994 */
995int __trace_puts(unsigned long ip, const char *str, int size)
996{
997 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -0500998 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500999 struct print_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001000 unsigned int trace_ctx;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001001 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001002
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001003 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001004 return 0;
1005
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001006 if (unlikely(tracing_selftest_running || tracing_disabled))
1007 return 0;
1008
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001009 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1010
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001011 trace_ctx = tracing_gen_ctx();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001012 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001013 ring_buffer_nest_start(buffer);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001014 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1015 trace_ctx);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001016 if (!event) {
1017 size = 0;
1018 goto out;
1019 }
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001020
1021 entry = ring_buffer_event_data(event);
1022 entry->ip = ip;
1023
1024 memcpy(&entry->buf, str, size);
1025
1026 /* Add a newline if necessary */
1027 if (entry->buf[size - 1] != '\n') {
1028 entry->buf[size] = '\n';
1029 entry->buf[size + 1] = '\0';
1030 } else
1031 entry->buf[size] = '\0';
1032
1033 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001034 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001035 out:
1036 ring_buffer_nest_end(buffer);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001037 return size;
1038}
1039EXPORT_SYMBOL_GPL(__trace_puts);
1040
1041/**
1042 * __trace_bputs - write the pointer to a constant string into trace buffer
1043 * @ip: The address of the caller
1044 * @str: The constant string to write to the buffer to
1045 */
1046int __trace_bputs(unsigned long ip, const char *str)
1047{
1048 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05001049 struct trace_buffer *buffer;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001050 struct bputs_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001051 unsigned int trace_ctx;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001052 int size = sizeof(struct bputs_entry);
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001053 int ret = 0;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +08001054
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001055 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +08001056 return 0;
1057
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -05001058 if (unlikely(tracing_selftest_running || tracing_disabled))
1059 return 0;
1060
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001061 trace_ctx = tracing_gen_ctx();
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001062 buffer = global_trace.array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001063
1064 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05001065 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001066 trace_ctx);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001067 if (!event)
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001068 goto out;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001069
1070 entry = ring_buffer_event_data(event);
1071 entry->ip = ip;
1072 entry->str = str;
1073
1074 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01001075 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001076
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05001077 ret = 1;
1078 out:
1079 ring_buffer_nest_end(buffer);
1080 return ret;
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05001081}
1082EXPORT_SYMBOL_GPL(__trace_bputs);
1083
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001084#ifdef CONFIG_TRACER_SNAPSHOT
Zou Wei192b7992020-04-23 12:08:25 +08001085static void tracing_snapshot_instance_cond(struct trace_array *tr,
1086 void *cond_data)
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001087{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001088 struct tracer *tracer = tr->current_trace;
1089 unsigned long flags;
1090
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001091 if (in_nmi()) {
1092 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1093 internal_trace_puts("*** snapshot is being ignored ***\n");
1094 return;
1095 }
1096
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001097 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001098 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1099 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001100 tracing_off();
1101 return;
1102 }
1103
1104 /* Note, snapshot can not be used when the tracer uses it */
1105 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -05001106 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1107 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001108 return;
1109 }
1110
1111 local_irq_save(flags);
Tom Zanussia35873a2019-02-13 17:42:45 -06001112 update_max_tr(tr, current, smp_processor_id(), cond_data);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001113 local_irq_restore(flags);
1114}
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001115
Tom Zanussia35873a2019-02-13 17:42:45 -06001116void tracing_snapshot_instance(struct trace_array *tr)
1117{
1118 tracing_snapshot_instance_cond(tr, NULL);
1119}
1120
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001121/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001122 * tracing_snapshot - take a snapshot of the current buffer.
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04001123 *
1124 * This causes a swap between the snapshot buffer and the current live
1125 * tracing buffer. You can use this to take snapshots of the live
1126 * trace when some condition is triggered, but continue to trace.
1127 *
1128 * Note, make sure to allocate the snapshot with either
1129 * a tracing_snapshot_alloc(), or by doing it manually
1130 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1131 *
1132 * If the snapshot buffer is not allocated, it will stop tracing.
1133 * Basically making a permanent snapshot.
1134 */
1135void tracing_snapshot(void)
1136{
1137 struct trace_array *tr = &global_trace;
1138
1139 tracing_snapshot_instance(tr);
1140}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001141EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001142
Tom Zanussia35873a2019-02-13 17:42:45 -06001143/**
1144 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1145 * @tr: The tracing instance to snapshot
1146 * @cond_data: The data to be tested conditionally, and possibly saved
1147 *
1148 * This is the same as tracing_snapshot() except that the snapshot is
1149 * conditional - the snapshot will only happen if the
1150 * cond_snapshot.update() implementation receiving the cond_data
1151 * returns true, which means that the trace array's cond_snapshot
1152 * update() operation used the cond_data to determine whether the
1153 * snapshot should be taken, and if it was, presumably saved it along
1154 * with the snapshot.
1155 */
1156void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1157{
1158 tracing_snapshot_instance_cond(tr, cond_data);
1159}
1160EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1161
1162/**
1163 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1164 * @tr: The tracing instance
1165 *
1166 * When the user enables a conditional snapshot using
1167 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1168 * with the snapshot. This accessor is used to retrieve it.
1169 *
1170 * Should not be called from cond_snapshot.update(), since it takes
1171 * the tr->max_lock lock, which the code calling
1172 * cond_snapshot.update() has already done.
1173 *
1174 * Returns the cond_data associated with the trace array's snapshot.
1175 */
1176void *tracing_cond_snapshot_data(struct trace_array *tr)
1177{
1178 void *cond_data = NULL;
1179
1180 arch_spin_lock(&tr->max_lock);
1181
1182 if (tr->cond_snapshot)
1183 cond_data = tr->cond_snapshot->cond_data;
1184
1185 arch_spin_unlock(&tr->max_lock);
1186
1187 return cond_data;
1188}
1189EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1190
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001191static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1192 struct array_buffer *size_buf, int cpu_id);
1193static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001194
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001195int tracing_alloc_snapshot_instance(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001196{
1197 int ret;
1198
1199 if (!tr->allocated_snapshot) {
1200
1201 /* allocate spare buffer */
1202 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001203 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001204 if (ret < 0)
1205 return ret;
1206
1207 tr->allocated_snapshot = true;
1208 }
1209
1210 return 0;
1211}
1212
Fabian Frederickad1438a2014-04-17 21:44:42 +02001213static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001214{
1215 /*
1216 * We don't free the ring buffer. instead, resize it because
1217 * The max_tr ring buffer has some state (e.g. ring->clock) and
1218 * we want preserve it.
1219 */
1220 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1221 set_buffer_entries(&tr->max_buffer, 1);
1222 tracing_reset_online_cpus(&tr->max_buffer);
1223 tr->allocated_snapshot = false;
1224}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001225
1226/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001227 * tracing_alloc_snapshot - allocate snapshot buffer.
1228 *
1229 * This only allocates the snapshot buffer if it isn't already
1230 * allocated - it doesn't also take a snapshot.
1231 *
1232 * This is meant to be used in cases where the snapshot buffer needs
1233 * to be set up for events that can't sleep but need to be able to
1234 * trigger a snapshot.
1235 */
1236int tracing_alloc_snapshot(void)
1237{
1238 struct trace_array *tr = &global_trace;
1239 int ret;
1240
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04001241 ret = tracing_alloc_snapshot_instance(tr);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001242 WARN_ON(ret < 0);
1243
1244 return ret;
1245}
1246EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1247
1248/**
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001249 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001250 *
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001251 * This is similar to tracing_snapshot(), but it will allocate the
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001252 * snapshot buffer if it isn't already allocated. Use this only
1253 * where it is safe to sleep, as the allocation may sleep.
1254 *
1255 * This causes a swap between the snapshot buffer and the current live
1256 * tracing buffer. You can use this to take snapshots of the live
1257 * trace when some condition is triggered, but continue to trace.
1258 */
1259void tracing_snapshot_alloc(void)
1260{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001261 int ret;
1262
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001263 ret = tracing_alloc_snapshot();
1264 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04001265 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001266
1267 tracing_snapshot();
1268}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001269EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001270
1271/**
1272 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1273 * @tr: The tracing instance
1274 * @cond_data: User data to associate with the snapshot
1275 * @update: Implementation of the cond_snapshot update function
1276 *
1277 * Check whether the conditional snapshot for the given instance has
1278 * already been enabled, or if the current tracer is already using a
1279 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1280 * save the cond_data and update function inside.
1281 *
1282 * Returns 0 if successful, error otherwise.
1283 */
1284int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1285 cond_update_fn_t update)
1286{
1287 struct cond_snapshot *cond_snapshot;
1288 int ret = 0;
1289
1290 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1291 if (!cond_snapshot)
1292 return -ENOMEM;
1293
1294 cond_snapshot->cond_data = cond_data;
1295 cond_snapshot->update = update;
1296
1297 mutex_lock(&trace_types_lock);
1298
1299 ret = tracing_alloc_snapshot_instance(tr);
1300 if (ret)
1301 goto fail_unlock;
1302
1303 if (tr->current_trace->use_max_tr) {
1304 ret = -EBUSY;
1305 goto fail_unlock;
1306 }
1307
Steven Rostedt (VMware)1c347a92019-02-14 18:45:21 -05001308 /*
1309 * The cond_snapshot can only change to NULL without the
1310 * trace_types_lock. We don't care if we race with it going
1311 * to NULL, but we want to make sure that it's not set to
1312 * something other than NULL when we get here, which we can
1313 * do safely with only holding the trace_types_lock and not
1314 * having to take the max_lock.
1315 */
Tom Zanussia35873a2019-02-13 17:42:45 -06001316 if (tr->cond_snapshot) {
1317 ret = -EBUSY;
1318 goto fail_unlock;
1319 }
1320
1321 arch_spin_lock(&tr->max_lock);
1322 tr->cond_snapshot = cond_snapshot;
1323 arch_spin_unlock(&tr->max_lock);
1324
1325 mutex_unlock(&trace_types_lock);
1326
1327 return ret;
1328
1329 fail_unlock:
1330 mutex_unlock(&trace_types_lock);
1331 kfree(cond_snapshot);
1332 return ret;
1333}
1334EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1335
1336/**
1337 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1338 * @tr: The tracing instance
1339 *
1340 * Check whether the conditional snapshot for the given instance is
1341 * enabled; if so, free the cond_snapshot associated with it,
1342 * otherwise return -EINVAL.
1343 *
1344 * Returns 0 if successful, error otherwise.
1345 */
1346int tracing_snapshot_cond_disable(struct trace_array *tr)
1347{
1348 int ret = 0;
1349
1350 arch_spin_lock(&tr->max_lock);
1351
1352 if (!tr->cond_snapshot)
1353 ret = -EINVAL;
1354 else {
1355 kfree(tr->cond_snapshot);
1356 tr->cond_snapshot = NULL;
1357 }
1358
1359 arch_spin_unlock(&tr->max_lock);
1360
1361 return ret;
1362}
1363EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001364#else
1365void tracing_snapshot(void)
1366{
1367 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1368}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001369EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussia35873a2019-02-13 17:42:45 -06001370void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1371{
1372 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1373}
1374EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
Tom Zanussi93e31ff2013-10-24 08:59:26 -05001375int tracing_alloc_snapshot(void)
1376{
1377 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1378 return -ENODEV;
1379}
1380EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001381void tracing_snapshot_alloc(void)
1382{
1383 /* Give warning */
1384 tracing_snapshot();
1385}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -05001386EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Tom Zanussia35873a2019-02-13 17:42:45 -06001387void *tracing_cond_snapshot_data(struct trace_array *tr)
1388{
1389 return NULL;
1390}
1391EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1392int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1393{
1394 return -ENODEV;
1395}
1396EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1397int tracing_snapshot_cond_disable(struct trace_array *tr)
1398{
1399 return false;
1400}
1401EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -05001402#endif /* CONFIG_TRACER_SNAPSHOT */
1403
Steven Rostedt (VMware)2290f2c2017-04-20 11:46:03 -04001404void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001405{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001406 if (tr->array_buffer.buffer)
1407 ring_buffer_record_off(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001408 /*
1409 * This flag is looked at when buffers haven't been allocated
1410 * yet, or by some tracers (like irqsoff), that just want to
1411 * know if the ring buffer has been disabled, but it can handle
1412 * races of where it gets disabled but we still do a record.
1413 * As the check is in the fast path of the tracers, it is more
1414 * important to be fast than accurate.
1415 */
1416 tr->buffer_disabled = 1;
1417 /* Make the flag seen by readers */
1418 smp_wmb();
1419}
1420
Steven Rostedt499e5472012-02-22 15:50:28 -05001421/**
1422 * tracing_off - turn off tracing buffers
1423 *
1424 * This function stops the tracing buffers from recording data.
1425 * It does not disable any overhead the tracers themselves may
1426 * be causing. This function simply causes all recording to
1427 * the ring buffers to fail.
1428 */
1429void tracing_off(void)
1430{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001431 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001432}
1433EXPORT_SYMBOL_GPL(tracing_off);
1434
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001435void disable_trace_on_warning(void)
1436{
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001437 if (__disable_trace_on_warning) {
1438 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1439 "Disabling tracing due to warning\n");
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001440 tracing_off();
Steven Rostedt (VMware)c2007842020-05-29 10:46:32 -04001441 }
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -04001442}
1443
Steven Rostedt499e5472012-02-22 15:50:28 -05001444/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001445 * tracer_tracing_is_on - show real state of ring buffer enabled
1446 * @tr : the trace array to know if ring buffer is enabled
1447 *
1448 * Shows real state of the ring buffer if it is enabled or not.
1449 */
Steven Rostedt (VMware)ec573502018-08-01 16:08:57 -04001450bool tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001451{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001452 if (tr->array_buffer.buffer)
1453 return ring_buffer_record_is_on(tr->array_buffer.buffer);
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001454 return !tr->buffer_disabled;
1455}
1456
Steven Rostedt499e5472012-02-22 15:50:28 -05001457/**
1458 * tracing_is_on - show state of ring buffers enabled
1459 */
1460int tracing_is_on(void)
1461{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04001462 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -05001463}
1464EXPORT_SYMBOL_GPL(tracing_is_on);
1465
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001466static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001467{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001468 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001469
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001470 if (!str)
1471 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +08001472 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001473 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +08001474 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02001475 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001476 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001477 return 1;
1478}
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001479__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001480
Tim Bird0e950172010-02-25 15:36:43 -08001481static int __init set_tracing_thresh(char *str)
1482{
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001483 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -08001484 int ret;
1485
1486 if (!str)
1487 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +02001488 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -08001489 if (ret < 0)
1490 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +08001491 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -08001492 return 1;
1493}
1494__setup("tracing_thresh=", set_tracing_thresh);
1495
Steven Rostedt57f50be2008-05-12 21:20:44 +02001496unsigned long nsecs_to_usecs(unsigned long nsecs)
1497{
1498 return nsecs / 1000;
1499}
1500
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001501/*
1502 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001503 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001504 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
Jeremy Lintonf57a4142017-05-31 16:56:48 -05001505 * of strings in the order that the evals (enum) were defined.
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001506 */
1507#undef C
1508#define C(a, b) b
1509
Ingo Molnarf2cc0202021-03-23 18:49:35 +01001510/* These must match the bit positions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -04001512 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001513 NULL
1514};
1515
Zhaolei5079f322009-08-25 16:12:56 +08001516static struct {
1517 u64 (*func)(void);
1518 const char *name;
David Sharp8be07092012-11-13 12:18:22 -08001519 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +08001520} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001521 { trace_clock_local, "local", 1 },
1522 { trace_clock_global, "global", 1 },
1523 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -07001524 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +00001525 { trace_clock, "perf", 1 },
1526 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -07001527 { ktime_get_raw_fast_ns, "mono_raw", 1 },
Thomas Gleixnera3ed0e432018-04-25 15:33:38 +02001528 { ktime_get_boot_fast_ns, "boot", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -08001529 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +08001530};
1531
Tom Zanussi860f9f62018-01-15 20:51:48 -06001532bool trace_clock_in_ns(struct trace_array *tr)
1533{
1534 if (trace_clocks[tr->clock_id].in_ns)
1535 return true;
1536
1537 return false;
1538}
1539
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001540/*
1541 * trace_parser_get_init - gets the buffer for trace parser
1542 */
1543int trace_parser_get_init(struct trace_parser *parser, int size)
1544{
1545 memset(parser, 0, sizeof(*parser));
1546
1547 parser->buffer = kmalloc(size, GFP_KERNEL);
1548 if (!parser->buffer)
1549 return 1;
1550
1551 parser->size = size;
1552 return 0;
1553}
1554
1555/*
1556 * trace_parser_put - frees the buffer for trace parser
1557 */
1558void trace_parser_put(struct trace_parser *parser)
1559{
1560 kfree(parser->buffer);
Steven Rostedt (VMware)0e684b62017-02-02 17:58:18 -05001561 parser->buffer = NULL;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001562}
1563
1564/*
1565 * trace_get_user - reads the user input string separated by space
1566 * (matched by isspace(ch))
1567 *
1568 * For each string found the 'struct trace_parser' is updated,
1569 * and the function returns.
1570 *
1571 * Returns number of bytes read.
1572 *
1573 * See kernel/trace/trace.h for 'struct trace_parser' details.
1574 */
1575int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1576 size_t cnt, loff_t *ppos)
1577{
1578 char ch;
1579 size_t read = 0;
1580 ssize_t ret;
1581
1582 if (!*ppos)
1583 trace_parser_clear(parser);
1584
1585 ret = get_user(ch, ubuf++);
1586 if (ret)
1587 goto out;
1588
1589 read++;
1590 cnt--;
1591
1592 /*
1593 * The parser is not finished with the last write,
1594 * continue reading the user input without skipping spaces.
1595 */
1596 if (!parser->cont) {
1597 /* skip white space */
1598 while (cnt && isspace(ch)) {
1599 ret = get_user(ch, ubuf++);
1600 if (ret)
1601 goto out;
1602 read++;
1603 cnt--;
1604 }
1605
Changbin Du76638d92018-01-16 17:02:29 +08001606 parser->idx = 0;
1607
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001608 /* only spaces were written */
Changbin Du921a7ac2018-01-16 17:02:28 +08001609 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001610 *ppos += read;
1611 ret = read;
1612 goto out;
1613 }
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001614 }
1615
1616 /* read the non-space input */
Changbin Du921a7ac2018-01-16 17:02:28 +08001617 while (cnt && !isspace(ch) && ch) {
Li Zefan3c235a32009-09-22 13:51:54 +08001618 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001619 parser->buffer[parser->idx++] = ch;
1620 else {
1621 ret = -EINVAL;
1622 goto out;
1623 }
1624 ret = get_user(ch, ubuf++);
1625 if (ret)
1626 goto out;
1627 read++;
1628 cnt--;
1629 }
1630
1631 /* We either got finished input or we have to wait for another call. */
Changbin Du921a7ac2018-01-16 17:02:28 +08001632 if (isspace(ch) || !ch) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001633 parser->buffer[parser->idx] = 0;
1634 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -04001635 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001636 parser->cont = true;
1637 parser->buffer[parser->idx++] = ch;
Changbin Duf4d07062018-01-16 17:02:30 +08001638 /* Make sure the parsed string always terminates with '\0'. */
1639 parser->buffer[parser->idx] = 0;
Steven Rostedt057db842013-10-09 22:23:23 -04001640 } else {
1641 ret = -EINVAL;
1642 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +02001643 }
1644
1645 *ppos += read;
1646 ret = read;
1647
1648out:
1649 return ret;
1650}
1651
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001652/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001653static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001654{
1655 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001656
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001657 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001658 return -EBUSY;
1659
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001660 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001661 if (cnt > len)
1662 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001663 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001664
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001665 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001666 return cnt;
1667}
1668
Tim Bird0e950172010-02-25 15:36:43 -08001669unsigned long __read_mostly tracing_thresh;
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001670static const struct file_operations tracing_max_lat_fops;
1671
Steven Rostedt (VMware)6880c982021-06-25 19:47:33 -04001672#ifdef LATENCY_FS_NOTIFY
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001673
1674static struct workqueue_struct *fsnotify_wq;
1675
1676static void latency_fsnotify_workfn(struct work_struct *work)
1677{
1678 struct trace_array *tr = container_of(work, struct trace_array,
1679 fsnotify_work);
Amir Goldstein82ace1e2020-07-22 15:58:44 +03001680 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001681}
1682
1683static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1684{
1685 struct trace_array *tr = container_of(iwork, struct trace_array,
1686 fsnotify_irqwork);
1687 queue_work(fsnotify_wq, &tr->fsnotify_work);
1688}
1689
1690static void trace_create_maxlat_file(struct trace_array *tr,
1691 struct dentry *d_tracer)
1692{
1693 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1694 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04001695 tr->d_max_latency = trace_create_file("tracing_max_latency",
1696 TRACE_MODE_WRITE,
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001697 d_tracer, &tr->max_latency,
1698 &tracing_max_lat_fops);
1699}
1700
1701__init static int latency_fsnotify_init(void)
1702{
1703 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1704 WQ_UNBOUND | WQ_HIGHPRI, 0);
1705 if (!fsnotify_wq) {
1706 pr_err("Unable to allocate tr_max_lat_wq\n");
1707 return -ENOMEM;
1708 }
1709 return 0;
1710}
1711
1712late_initcall_sync(latency_fsnotify_init);
1713
1714void latency_fsnotify(struct trace_array *tr)
1715{
1716 if (!fsnotify_wq)
1717 return;
1718 /*
1719 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1720 * possible that we are called from __schedule() or do_idle(), which
1721 * could cause a deadlock.
1722 */
1723 irq_work_queue(&tr->fsnotify_irqwork);
1724}
1725
Jackie Liu424b6502021-09-22 10:51:22 +08001726#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
1727 || defined(CONFIG_OSNOISE_TRACER)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001728
1729#define trace_create_maxlat_file(tr, d_tracer) \
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04001730 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1731 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001732
Jackie Liu424b6502021-09-22 10:51:22 +08001733#else
1734#define trace_create_maxlat_file(tr, d_tracer) do { } while (0)
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001735#endif
Tim Bird0e950172010-02-25 15:36:43 -08001736
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001737#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001738/*
1739 * Copy the new maximum trace into the separate maximum-trace
1740 * structure. (this way the maximum trace is permanently saved,
Chunyu Hu5a93bae2017-10-19 14:32:33 +08001741 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001742 */
1743static void
1744__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1745{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001746 struct array_buffer *trace_buf = &tr->array_buffer;
1747 struct array_buffer *max_buf = &tr->max_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001748 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1749 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001750
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001751 max_buf->cpu = cpu;
1752 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001753
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001754 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001755 max_data->critical_start = data->critical_start;
1756 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001757
Tom Zanussi85f726a2019-03-05 10:12:00 -06001758 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001759 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001760 /*
1761 * If tsk == current, then use current_uid(), as that does not use
1762 * RCU. The irq tracer can be called out of RCU scope.
1763 */
1764 if (tsk == current)
1765 max_data->uid = current_uid();
1766 else
1767 max_data->uid = task_uid(tsk);
1768
Steven Rostedt8248ac02009-09-02 12:27:41 -04001769 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1770 max_data->policy = tsk->policy;
1771 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001772
1773 /* record this tasks comm */
1774 tracing_record_cmdline(tsk);
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02001775 latency_fsnotify(tr);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001776}
1777
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001778/**
1779 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1780 * @tr: tracer
1781 * @tsk: the task with the latency
1782 * @cpu: The cpu that initiated the trace.
Tom Zanussia35873a2019-02-13 17:42:45 -06001783 * @cond_data: User data associated with a conditional snapshot
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001784 *
1785 * Flip the buffers between the @tr and the max_tr and record information
1786 * about which task was the cause of this latency.
1787 */
Ingo Molnare309b412008-05-12 21:20:51 +02001788void
Tom Zanussia35873a2019-02-13 17:42:45 -06001789update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1790 void *cond_data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001791{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001792 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001793 return;
1794
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001795 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001796
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001797 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001798 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001799 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001800 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001801 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001802
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001803 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001804
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001805 /* Inherit the recordable setting from array_buffer */
1806 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
Masami Hiramatsu73c8d892018-07-14 01:28:15 +09001807 ring_buffer_record_on(tr->max_buffer.buffer);
1808 else
1809 ring_buffer_record_off(tr->max_buffer.buffer);
1810
Tom Zanussia35873a2019-02-13 17:42:45 -06001811#ifdef CONFIG_TRACER_SNAPSHOT
1812 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1813 goto out_unlock;
1814#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001815 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001816
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001817 __update_max_tr(tr, tsk, cpu);
Tom Zanussia35873a2019-02-13 17:42:45 -06001818
1819 out_unlock:
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001820 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001821}
1822
1823/**
1824 * update_max_tr_single - only copy one trace over, and reset the rest
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07001825 * @tr: tracer
1826 * @tsk: task with the latency
1827 * @cpu: the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001828 *
1829 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001830 */
Ingo Molnare309b412008-05-12 21:20:51 +02001831void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001832update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1833{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001834 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001835
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001836 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001837 return;
1838
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001839 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001840 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001841 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001842 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001843 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001844 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001845
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001846 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001847
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001848 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001849
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001850 if (ret == -EBUSY) {
1851 /*
1852 * We failed to swap the buffer due to a commit taking
1853 * place on this CPU. We fail to record, but we reset
1854 * the max trace buffer (no one writes directly to it)
1855 * and flag that it failed.
1856 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001857 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001858 "Failed to swap buffers due to commit in progress\n");
1859 }
1860
Steven Rostedte8165dbb2009-09-03 19:13:05 -04001861 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001862
1863 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001864 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001865}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001866#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001867
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05001868static int wait_on_pipe(struct trace_iterator *iter, int full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001869{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001870 /* Iterators are static, they should be filled or empty */
1871 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001872 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001873
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001874 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
Rabin Vincente30f53a2014-11-10 19:46:34 +01001875 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001876}
1877
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001878#ifdef CONFIG_FTRACE_STARTUP_TEST
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001879static bool selftests_can_run;
1880
1881struct trace_selftests {
1882 struct list_head list;
1883 struct tracer *type;
1884};
1885
1886static LIST_HEAD(postponed_selftests);
1887
1888static int save_selftest(struct tracer *type)
1889{
1890 struct trace_selftests *selftest;
1891
1892 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1893 if (!selftest)
1894 return -ENOMEM;
1895
1896 selftest->type = type;
1897 list_add(&selftest->list, &postponed_selftests);
1898 return 0;
1899}
1900
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001901static int run_tracer_selftest(struct tracer *type)
1902{
1903 struct trace_array *tr = &global_trace;
1904 struct tracer *saved_tracer = tr->current_trace;
1905 int ret;
1906
1907 if (!type->selftest || tracing_selftest_disabled)
1908 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001909
1910 /*
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001911 * If a tracer registers early in boot up (before scheduling is
1912 * initialized and such), then do not run its selftests yet.
1913 * Instead, run it a little later in the boot process.
1914 */
1915 if (!selftests_can_run)
1916 return save_selftest(type);
1917
Steven Rostedt (VMware)ee666a12021-03-01 10:49:35 -05001918 if (!tracing_is_on()) {
1919 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1920 type->name);
1921 return 0;
1922 }
1923
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001924 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001925 * Run a selftest on this tracer.
1926 * Here we reset the trace buffer, and set the current
1927 * tracer to be this tracer. The tracer can then run some
1928 * internal tracing to verify that everything is in order.
1929 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001930 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001931 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001932
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001933 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001934
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001935#ifdef CONFIG_TRACER_MAX_TRACE
1936 if (type->use_max_tr) {
1937 /* If we expanded the buffers, make sure the max is expanded too */
1938 if (ring_buffer_expanded)
1939 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1940 RING_BUFFER_ALL_CPUS);
1941 tr->allocated_snapshot = true;
1942 }
1943#endif
1944
1945 /* the test is responsible for initializing and enabling */
1946 pr_info("Testing tracer %s: ", type->name);
1947 ret = type->selftest(type, tr);
1948 /* the test is responsible for resetting too */
1949 tr->current_trace = saved_tracer;
1950 if (ret) {
1951 printk(KERN_CONT "FAILED!\n");
1952 /* Add the warning after printing 'FAILED' */
1953 WARN_ON(1);
1954 return -1;
1955 }
1956 /* Only reset on passing, to avoid touching corrupted buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05001957 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001958
1959#ifdef CONFIG_TRACER_MAX_TRACE
1960 if (type->use_max_tr) {
1961 tr->allocated_snapshot = false;
1962
1963 /* Shrink the max buffer again */
1964 if (ring_buffer_expanded)
1965 ring_buffer_resize(tr->max_buffer.buffer, 1,
1966 RING_BUFFER_ALL_CPUS);
1967 }
1968#endif
1969
1970 printk(KERN_CONT "PASSED\n");
1971 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001972}
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001973
1974static __init int init_trace_selftests(void)
1975{
1976 struct trace_selftests *p, *n;
1977 struct tracer *t, **last;
1978 int ret;
1979
1980 selftests_can_run = true;
1981
1982 mutex_lock(&trace_types_lock);
1983
1984 if (list_empty(&postponed_selftests))
1985 goto out;
1986
1987 pr_info("Running postponed tracer tests:\n");
1988
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05001989 tracing_selftest_running = true;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001990 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
Anders Roxell6fc21712018-11-30 15:56:22 +01001991 /* This loop can take minutes when sanitizers are enabled, so
1992 * lets make sure we allow RCU processing.
1993 */
1994 cond_resched();
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04001995 ret = run_tracer_selftest(p->type);
1996 /* If the test fails, then warn and remove from available_tracers */
1997 if (ret < 0) {
1998 WARN(1, "tracer: %s failed selftest, disabling\n",
1999 p->type->name);
2000 last = &trace_types;
2001 for (t = trace_types; t; t = t->next) {
2002 if (t == p->type) {
2003 *last = t->next;
2004 break;
2005 }
2006 last = &t->next;
2007 }
2008 }
2009 list_del(&p->list);
2010 kfree(p);
2011 }
Steven Rostedt (VMware)78041c02020-02-20 15:38:01 -05002012 tracing_selftest_running = false;
Steven Rostedt (VMware)9afecfb2017-03-24 17:59:10 -04002013
2014 out:
2015 mutex_unlock(&trace_types_lock);
2016
2017 return 0;
2018}
Steven Rostedtb9ef0322017-05-17 11:14:35 -04002019core_initcall(init_trace_selftests);
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002020#else
2021static inline int run_tracer_selftest(struct tracer *type)
2022{
2023 return 0;
2024}
2025#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002026
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002027static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2028
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002029static void __init apply_trace_boot_options(void);
2030
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002031/**
2032 * register_tracer - register a tracer with the ftrace system.
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002033 * @type: the plugin for the tracer
Steven Rostedt4fcdae82008-05-12 21:21:00 +02002034 *
2035 * Register a new plugin tracer.
2036 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002037int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002038{
2039 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002040 int ret = 0;
2041
2042 if (!type->name) {
2043 pr_info("Tracer must have a name\n");
2044 return -1;
2045 }
2046
Dan Carpenter24a461d2010-07-10 12:06:44 +02002047 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08002048 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2049 return -1;
2050 }
2051
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002052 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11002053 pr_warn("Can not register tracer %s due to lockdown\n",
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05002054 type->name);
2055 return -EPERM;
2056 }
2057
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002058 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01002059
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002060 tracing_selftest_running = true;
2061
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002062 for (t = trace_types; t; t = t->next) {
2063 if (strcmp(type->name, t->name) == 0) {
2064 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08002065 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002066 type->name);
2067 ret = -1;
2068 goto out;
2069 }
2070 }
2071
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002072 if (!type->set_flag)
2073 type->set_flag = &dummy_set_flag;
Chunyu Hud39cdd22016-03-08 21:37:01 +08002074 if (!type->flags) {
2075 /*allocate a dummy tracer_flags*/
2076 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
Chunyu Huc8ca0032016-03-14 20:35:41 +08002077 if (!type->flags) {
2078 ret = -ENOMEM;
2079 goto out;
2080 }
Chunyu Hud39cdd22016-03-08 21:37:01 +08002081 type->flags->val = 0;
2082 type->flags->opts = dummy_tracer_opt;
2083 } else
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002084 if (!type->flags->opts)
2085 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01002086
Chunyu Hud39cdd22016-03-08 21:37:01 +08002087 /* store the tracer for __set_tracer_option */
2088 type->flags->trace = type;
2089
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05002090 ret = run_tracer_selftest(type);
2091 if (ret < 0)
2092 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02002093
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002094 type->next = trace_types;
2095 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04002096 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02002097
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002098 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01002099 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002100 mutex_unlock(&trace_types_lock);
2101
Steven Rostedtdac74942009-02-05 01:13:38 -05002102 if (ret || !default_bootup_tracer)
2103 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05002104
Li Zefanee6c2c12009-09-18 14:06:47 +08002105 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05002106 goto out_unlock;
2107
2108 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2109 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05002110 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05002111 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08002112
2113 apply_trace_boot_options();
2114
Steven Rostedtdac74942009-02-05 01:13:38 -05002115 /* disable other selftests, since this will break it. */
Masami Hiramatsu60efe212020-12-08 17:54:09 +09002116 disable_tracing_selftest("running a tracer");
Steven Rostedtdac74942009-02-05 01:13:38 -05002117
2118 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002119 return ret;
2120}
2121
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002122static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04002123{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002124 struct trace_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04002125
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002126 if (!buffer)
2127 return;
2128
Steven Rostedtf6339032009-09-04 12:35:16 -04002129 ring_buffer_record_disable(buffer);
2130
2131 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002132 synchronize_rcu();
Steven Rostedt68179682012-05-08 20:57:53 -04002133 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04002134
2135 ring_buffer_record_enable(buffer);
2136}
2137
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002138void tracing_reset_online_cpus(struct array_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02002139{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002140 struct trace_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02002141
Hiraku Toyookaa5416412012-12-19 16:02:34 +09002142 if (!buffer)
2143 return;
2144
Steven Rostedt621968c2009-09-04 12:02:35 -04002145 ring_buffer_record_disable(buffer);
2146
2147 /* Make sure all commits have finished */
Paul E. McKenney74401722018-11-06 18:44:52 -08002148 synchronize_rcu();
Steven Rostedt621968c2009-09-04 12:02:35 -04002149
Alexander Z Lam94571582013-08-02 18:36:16 -07002150 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002151
Nicholas Pigginb23d7a5f2020-06-25 15:34:03 +10002152 ring_buffer_reset_online_cpus(buffer);
Steven Rostedt621968c2009-09-04 12:02:35 -04002153
2154 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02002155}
2156
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04002157/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002158void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002159{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002160 struct trace_array *tr;
2161
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002162 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (VMware)065e63f2017-08-31 17:03:47 -04002163 if (!tr->clear_trace)
2164 continue;
2165 tr->clear_trace = false;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002166 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002167#ifdef CONFIG_TRACER_MAX_TRACE
2168 tracing_reset_online_cpus(&tr->max_buffer);
2169#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05002170 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04002171}
2172
Paul Burton4030a6e2021-07-01 10:24:07 -07002173/*
2174 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2175 * is the tgid last observed corresponding to pid=i.
2176 */
Joel Fernandesd914ba32017-06-26 19:01:55 -07002177static int *tgid_map;
2178
Paul Burton4030a6e2021-07-01 10:24:07 -07002179/* The maximum valid index into tgid_map. */
2180static size_t tgid_map_max;
2181
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002182#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002183#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01002184static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002185struct saved_cmdlines_buffer {
2186 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2187 unsigned *map_cmdline_to_pid;
2188 unsigned cmdline_num;
2189 int cmdline_idx;
2190 char *saved_cmdlines;
2191};
2192static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02002193
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002194static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002195{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002196 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2197}
2198
2199static inline void set_cmdline(int idx, const char *cmdline)
2200{
Tom Zanussi85f726a2019-03-05 10:12:00 -06002201 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002202}
2203
2204static int allocate_cmdlines_buffer(unsigned int val,
2205 struct saved_cmdlines_buffer *s)
2206{
Kees Cook6da2ec52018-06-12 13:55:00 -07002207 s->map_cmdline_to_pid = kmalloc_array(val,
2208 sizeof(*s->map_cmdline_to_pid),
2209 GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002210 if (!s->map_cmdline_to_pid)
2211 return -ENOMEM;
2212
Kees Cook6da2ec52018-06-12 13:55:00 -07002213 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002214 if (!s->saved_cmdlines) {
2215 kfree(s->map_cmdline_to_pid);
2216 return -ENOMEM;
2217 }
2218
2219 s->cmdline_idx = 0;
2220 s->cmdline_num = val;
2221 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2222 sizeof(s->map_pid_to_cmdline));
2223 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2224 val * sizeof(*s->map_cmdline_to_pid));
2225
2226 return 0;
2227}
2228
2229static int trace_create_savedcmd(void)
2230{
2231 int ret;
2232
Namhyung Kima6af8fb2014-06-10 16:11:35 +09002233 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002234 if (!savedcmd)
2235 return -ENOMEM;
2236
2237 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2238 if (ret < 0) {
2239 kfree(savedcmd);
2240 savedcmd = NULL;
2241 return -ENOMEM;
2242 }
2243
2244 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002245}
2246
Carsten Emdeb5130b12009-09-13 01:43:07 +02002247int is_tracing_stopped(void)
2248{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002249 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02002250}
2251
Steven Rostedt0f048702008-11-05 16:05:44 -05002252/**
2253 * tracing_start - quick start of the tracer
2254 *
2255 * If tracing is enabled but was stopped by tracing_stop,
2256 * this will start the tracer back up.
2257 */
2258void tracing_start(void)
2259{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002260 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002261 unsigned long flags;
2262
2263 if (tracing_disabled)
2264 return;
2265
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002266 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2267 if (--global_trace.stop_count) {
2268 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05002269 /* Someone screwed up their debugging */
2270 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002271 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05002272 }
Steven Rostedt0f048702008-11-05 16:05:44 -05002273 goto out;
2274 }
2275
Steven Rostedta2f80712010-03-12 19:56:00 -05002276 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002277 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05002278
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002279 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002280 if (buffer)
2281 ring_buffer_record_enable(buffer);
2282
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002283#ifdef CONFIG_TRACER_MAX_TRACE
2284 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002285 if (buffer)
2286 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002287#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002288
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002289 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002290
Steven Rostedt0f048702008-11-05 16:05:44 -05002291 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002292 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2293}
2294
2295static void tracing_start_tr(struct trace_array *tr)
2296{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002297 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002298 unsigned long flags;
2299
2300 if (tracing_disabled)
2301 return;
2302
2303 /* If global, we need to also start the max tracer */
2304 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2305 return tracing_start();
2306
2307 raw_spin_lock_irqsave(&tr->start_lock, flags);
2308
2309 if (--tr->stop_count) {
2310 if (tr->stop_count < 0) {
2311 /* Someone screwed up their debugging */
2312 WARN_ON_ONCE(1);
2313 tr->stop_count = 0;
2314 }
2315 goto out;
2316 }
2317
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002318 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002319 if (buffer)
2320 ring_buffer_record_enable(buffer);
2321
2322 out:
2323 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002324}
2325
2326/**
2327 * tracing_stop - quick stop of the tracer
2328 *
2329 * Light weight way to stop tracing. Use in conjunction with
2330 * tracing_start.
2331 */
2332void tracing_stop(void)
2333{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002334 struct trace_buffer *buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002335 unsigned long flags;
2336
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002337 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2338 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05002339 goto out;
2340
Steven Rostedta2f80712010-03-12 19:56:00 -05002341 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002342 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002343
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002344 buffer = global_trace.array_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002345 if (buffer)
2346 ring_buffer_record_disable(buffer);
2347
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002348#ifdef CONFIG_TRACER_MAX_TRACE
2349 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05002350 if (buffer)
2351 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002352#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05002353
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05002354 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05002355
Steven Rostedt0f048702008-11-05 16:05:44 -05002356 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002357 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2358}
2359
2360static void tracing_stop_tr(struct trace_array *tr)
2361{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002362 struct trace_buffer *buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002363 unsigned long flags;
2364
2365 /* If global, we need to also stop the max tracer */
2366 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2367 return tracing_stop();
2368
2369 raw_spin_lock_irqsave(&tr->start_lock, flags);
2370 if (tr->stop_count++)
2371 goto out;
2372
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05002373 buffer = tr->array_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002374 if (buffer)
2375 ring_buffer_record_disable(buffer);
2376
2377 out:
2378 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05002379}
2380
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002381static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002382{
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002383 unsigned tpid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002384
Joel Fernandeseaf260a2017-07-06 16:00:21 -07002385 /* treat recording of idle task as a success */
2386 if (!tsk->pid)
2387 return 1;
2388
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002389 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002390
2391 /*
2392 * It's not the end of the world if we don't get
2393 * the lock, but we also don't want to spin
2394 * nor do we want to disable interrupts,
2395 * so if we miss here, then better luck next time.
2396 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002397 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002398 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002399
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002400 idx = savedcmd->map_pid_to_cmdline[tpid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01002401 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002402 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002403
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002404 savedcmd->map_pid_to_cmdline[tpid] = idx;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002405 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002406 }
2407
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002408 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09002409 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002410
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002411 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04002412
2413 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002414}
2415
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002416static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418 unsigned map;
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002419 int tpid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420
Steven Rostedt4ca530852009-03-16 19:20:15 -04002421 if (!pid) {
2422 strcpy(comm, "<idle>");
2423 return;
2424 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425
Steven Rostedt74bf4072010-01-25 15:11:53 -05002426 if (WARN_ON_ONCE(pid < 0)) {
2427 strcpy(comm, "<XXX>");
2428 return;
2429 }
2430
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002431 tpid = pid & (PID_MAX_DEFAULT - 1);
2432 map = savedcmd->map_pid_to_cmdline[tpid];
2433 if (map != NO_CMDLINE_MAP) {
2434 tpid = savedcmd->map_cmdline_to_pid[map];
2435 if (tpid == pid) {
2436 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2437 return;
2438 }
Steven Rostedt4ca530852009-03-16 19:20:15 -04002439 }
Steven Rostedt (VMware)785e3c02021-04-27 11:32:07 -04002440 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04002441}
2442
2443void trace_find_cmdline(int pid, char comm[])
2444{
2445 preempt_disable();
2446 arch_spin_lock(&trace_cmdline_lock);
2447
2448 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002449
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002450 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02002451 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002452}
2453
Paul Burton4030a6e2021-07-01 10:24:07 -07002454static int *trace_find_tgid_ptr(int pid)
2455{
2456 /*
2457 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2458 * if we observe a non-NULL tgid_map then we also observe the correct
2459 * tgid_map_max.
2460 */
2461 int *map = smp_load_acquire(&tgid_map);
2462
2463 if (unlikely(!map || pid > tgid_map_max))
2464 return NULL;
2465
2466 return &map[pid];
2467}
2468
Joel Fernandesd914ba32017-06-26 19:01:55 -07002469int trace_find_tgid(int pid)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002470{
Paul Burton4030a6e2021-07-01 10:24:07 -07002471 int *ptr = trace_find_tgid_ptr(pid);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002472
Paul Burton4030a6e2021-07-01 10:24:07 -07002473 return ptr ? *ptr : 0;
Joel Fernandesd914ba32017-06-26 19:01:55 -07002474}
2475
2476static int trace_save_tgid(struct task_struct *tsk)
2477{
Paul Burton4030a6e2021-07-01 10:24:07 -07002478 int *ptr;
2479
Joel Fernandesbd45d342017-07-06 16:00:22 -07002480 /* treat recording of idle task as a success */
2481 if (!tsk->pid)
2482 return 1;
2483
Paul Burton4030a6e2021-07-01 10:24:07 -07002484 ptr = trace_find_tgid_ptr(tsk->pid);
2485 if (!ptr)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002486 return 0;
2487
Paul Burton4030a6e2021-07-01 10:24:07 -07002488 *ptr = tsk->tgid;
Joel Fernandesd914ba32017-06-26 19:01:55 -07002489 return 1;
2490}
2491
2492static bool tracing_record_taskinfo_skip(int flags)
2493{
2494 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2495 return true;
Joel Fernandesd914ba32017-06-26 19:01:55 -07002496 if (!__this_cpu_read(trace_taskinfo_save))
2497 return true;
2498 return false;
2499}
2500
2501/**
2502 * tracing_record_taskinfo - record the task info of a task
2503 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002504 * @task: task to record
2505 * @flags: TRACE_RECORD_CMDLINE for recording comm
2506 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002507 */
2508void tracing_record_taskinfo(struct task_struct *task, int flags)
2509{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002510 bool done;
2511
Joel Fernandesd914ba32017-06-26 19:01:55 -07002512 if (tracing_record_taskinfo_skip(flags))
2513 return;
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002514
2515 /*
2516 * Record as much task information as possible. If some fail, continue
2517 * to try to record the others.
2518 */
2519 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2520 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2521
2522 /* If recording any information failed, retry again soon. */
2523 if (!done)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002524 return;
2525
Joel Fernandesd914ba32017-06-26 19:01:55 -07002526 __this_cpu_write(trace_taskinfo_save, false);
2527}
2528
2529/**
2530 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2531 *
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07002532 * @prev: previous task during sched_switch
2533 * @next: next task during sched_switch
2534 * @flags: TRACE_RECORD_CMDLINE for recording comm
2535 * TRACE_RECORD_TGID for recording tgid
Joel Fernandesd914ba32017-06-26 19:01:55 -07002536 */
2537void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2538 struct task_struct *next, int flags)
2539{
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002540 bool done;
2541
Joel Fernandesd914ba32017-06-26 19:01:55 -07002542 if (tracing_record_taskinfo_skip(flags))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002543 return;
2544
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002545 /*
2546 * Record as much task information as possible. If some fail, continue
2547 * to try to record the others.
2548 */
2549 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2550 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2551 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2552 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
Joel Fernandesd914ba32017-06-26 19:01:55 -07002553
Joel Fernandes29b1a8a2017-07-06 16:00:23 -07002554 /* If recording any information failed, retry again soon. */
2555 if (!done)
Joel Fernandesd914ba32017-06-26 19:01:55 -07002556 return;
2557
2558 __this_cpu_write(trace_taskinfo_save, false);
2559}
2560
2561/* Helpers to record a specific task information */
2562void tracing_record_cmdline(struct task_struct *task)
2563{
2564 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2565}
2566
2567void tracing_record_tgid(struct task_struct *task)
2568{
2569 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002570}
2571
Steven Rostedt (VMware)af0009f2017-03-16 11:01:06 -04002572/*
2573 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2574 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2575 * simplifies those functions and keeps them in sync.
2576 */
2577enum print_line_t trace_handle_return(struct trace_seq *s)
2578{
2579 return trace_seq_has_overflowed(s) ?
2580 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2581}
2582EXPORT_SYMBOL_GPL(trace_handle_return);
2583
Thomas Gleixner54357f02021-08-10 15:26:25 +02002584static unsigned short migration_disable_value(void)
2585{
2586#if defined(CONFIG_SMP)
2587 return current->migration_disabled;
2588#else
2589 return 0;
2590#endif
2591}
2592
Sebastian Andrzej Siewior0c020062021-01-25 20:45:09 +01002593unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002594{
Sebastian Andrzej Siewior0c020062021-01-25 20:45:09 +01002595 unsigned int trace_flags = irqs_status;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002596 unsigned int pc;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002597
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002598 pc = preempt_count();
2599
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002600 if (pc & NMI_MASK)
2601 trace_flags |= TRACE_FLAG_NMI;
2602 if (pc & HARDIRQ_MASK)
2603 trace_flags |= TRACE_FLAG_HARDIRQ;
Sebastian Andrzej Siewiorfe427882021-01-25 20:45:10 +01002604 if (in_serving_softirq())
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002605 trace_flags |= TRACE_FLAG_SOFTIRQ;
Sebastian Andrzej Siewior289e7b02021-12-13 11:08:53 +01002606 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2607 trace_flags |= TRACE_FLAG_BH_OFF;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002608
2609 if (tif_need_resched())
2610 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2611 if (test_preempt_need_resched())
2612 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
Thomas Gleixner54357f02021-08-10 15:26:25 +02002613 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2614 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002615}
2616
Steven Rostedte77405a2009-09-02 14:17:06 -04002617struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002618trace_buffer_lock_reserve(struct trace_buffer *buffer,
Steven Rostedte77405a2009-09-02 14:17:06 -04002619 int type,
2620 unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002621 unsigned int trace_ctx)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002622{
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002623 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002624}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002625
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002626DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2627DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2628static int trace_buffered_event_ref;
2629
2630/**
2631 * trace_buffered_event_enable - enable buffering events
2632 *
2633 * When events are being filtered, it is quicker to use a temporary
2634 * buffer to write the event data into if there's a likely chance
2635 * that it will not be committed. The discard of the ring buffer
2636 * is not as fast as committing, and is much slower than copying
2637 * a commit.
2638 *
2639 * When an event is to be filtered, allocate per cpu buffers to
2640 * write the event data into, and if the event is filtered and discarded
2641 * it is simply dropped, otherwise, the entire data is to be committed
2642 * in one shot.
2643 */
2644void trace_buffered_event_enable(void)
2645{
2646 struct ring_buffer_event *event;
2647 struct page *page;
2648 int cpu;
2649
2650 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2651
2652 if (trace_buffered_event_ref++)
2653 return;
2654
2655 for_each_tracing_cpu(cpu) {
2656 page = alloc_pages_node(cpu_to_node(cpu),
2657 GFP_KERNEL | __GFP_NORETRY, 0);
2658 if (!page)
2659 goto failed;
2660
2661 event = page_address(page);
2662 memset(event, 0, sizeof(*event));
2663
2664 per_cpu(trace_buffered_event, cpu) = event;
2665
2666 preempt_disable();
2667 if (cpu == smp_processor_id() &&
Xianting Tianb427e762020-08-13 19:28:03 +08002668 __this_cpu_read(trace_buffered_event) !=
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002669 per_cpu(trace_buffered_event, cpu))
2670 WARN_ON_ONCE(1);
2671 preempt_enable();
2672 }
2673
2674 return;
2675 failed:
2676 trace_buffered_event_disable();
2677}
2678
2679static void enable_trace_buffered_event(void *data)
2680{
2681 /* Probably not needed, but do it anyway */
2682 smp_rmb();
2683 this_cpu_dec(trace_buffered_event_cnt);
2684}
2685
2686static void disable_trace_buffered_event(void *data)
2687{
2688 this_cpu_inc(trace_buffered_event_cnt);
2689}
2690
2691/**
2692 * trace_buffered_event_disable - disable buffering events
2693 *
2694 * When a filter is removed, it is faster to not use the buffered
2695 * events, and to commit directly into the ring buffer. Free up
2696 * the temp buffers when there are no more users. This requires
2697 * special synchronization with current events.
2698 */
2699void trace_buffered_event_disable(void)
2700{
2701 int cpu;
2702
2703 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2704
2705 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2706 return;
2707
2708 if (--trace_buffered_event_ref)
2709 return;
2710
2711 preempt_disable();
2712 /* For each CPU, set the buffer as used. */
2713 smp_call_function_many(tracing_buffer_mask,
2714 disable_trace_buffered_event, NULL, 1);
2715 preempt_enable();
2716
2717 /* Wait for all current users to finish */
Paul E. McKenney74401722018-11-06 18:44:52 -08002718 synchronize_rcu();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002719
2720 for_each_tracing_cpu(cpu) {
2721 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2722 per_cpu(trace_buffered_event, cpu) = NULL;
2723 }
2724 /*
2725 * Make sure trace_buffered_event is NULL before clearing
2726 * trace_buffered_event_cnt.
2727 */
2728 smp_wmb();
2729
2730 preempt_disable();
2731 /* Do the work on each cpu */
2732 smp_call_function_many(tracing_buffer_mask,
2733 enable_trace_buffered_event, NULL, 1);
2734 preempt_enable();
2735}
2736
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002737static struct trace_buffer *temp_buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002738
Steven Rostedtef5580d2009-02-27 19:38:04 -05002739struct ring_buffer_event *
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002740trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04002741 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04002742 int type, unsigned long len,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002743 unsigned int trace_ctx)
Steven Rostedtccb469a2012-08-02 10:32:10 -04002744{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002745 struct ring_buffer_event *entry;
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002746 struct trace_array *tr = trace_file->tr;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002747 int val;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002748
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002749 *current_rb = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002750
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04002751 if (!tr->no_filter_buffering_ref &&
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002752 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2753 preempt_disable_notrace();
Steven Rostedt (VMware)8f0901c2021-06-09 18:04:59 -04002754 /*
2755 * Filtering is on, so try to use the per cpu buffer first.
2756 * This buffer will simulate a ring_buffer_event,
2757 * where the type_len is zero and the array[0] will
2758 * hold the full length.
2759 * (see include/linux/ring-buffer.h for details on
2760 * how the ring_buffer_event is structured).
2761 *
2762 * Using a temp buffer during filtering and copying it
2763 * on a matched filter is quicker than writing directly
2764 * into the ring buffer and then discarding it when
2765 * it doesn't match. That is because the discard
2766 * requires several atomic operations to get right.
2767 * Copying on match and doing nothing on a failed match
2768 * is still quicker than no copy on match, but having
2769 * to discard out of the ring buffer on a failed match.
2770 */
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002771 if ((entry = __this_cpu_read(trace_buffered_event))) {
2772 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
Steven Rostedt (VMware)faa76a62021-06-09 18:04:58 -04002773
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002774 val = this_cpu_inc_return(trace_buffered_event_cnt);
Steven Rostedt (VMware)8f0901c2021-06-09 18:04:59 -04002775
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002776 /*
2777 * Preemption is disabled, but interrupts and NMIs
2778 * can still come in now. If that happens after
2779 * the above increment, then it will have to go
2780 * back to the old method of allocating the event
2781 * on the ring buffer, and if the filter fails, it
2782 * will have to call ring_buffer_discard_commit()
2783 * to remove it.
2784 *
2785 * Need to also check the unlikely case that the
2786 * length is bigger than the temp buffer size.
2787 * If that happens, then the reserve is pretty much
2788 * guaranteed to fail, as the ring buffer currently
2789 * only allows events less than a page. But that may
2790 * change in the future, so let the ring buffer reserve
2791 * handle the failure in that case.
2792 */
2793 if (val == 1 && likely(len <= max_len)) {
2794 trace_event_setup(entry, type, trace_ctx);
2795 entry->array[0] = len;
2796 /* Return with preemption disabled */
2797 return entry;
2798 }
2799 this_cpu_dec(trace_buffered_event_cnt);
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002800 }
Steven Rostedt (VMware)6c536d72021-11-29 21:39:47 -05002801 /* __trace_buffer_lock_reserve() disables preemption */
2802 preempt_enable_notrace();
Steven Rostedt (Red Hat)0fc1b092016-05-03 17:15:43 -04002803 }
2804
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002805 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2806 trace_ctx);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002807 /*
2808 * If tracing is off, but we have triggers enabled
2809 * we still need to look at the event data. Use the temp_buffer
Qiujun Huang906695e2020-10-31 16:57:14 +08002810 * to store the trace event for the trigger to use. It's recursive
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002811 * safe and will not be recorded anywhere.
2812 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04002813 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002814 *current_rb = temp_buffer;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002815 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2816 trace_ctx);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04002817 }
2818 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04002819}
2820EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2821
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002822static DEFINE_SPINLOCK(tracepoint_iter_lock);
2823static DEFINE_MUTEX(tracepoint_printk_mutex);
2824
2825static void output_printk(struct trace_event_buffer *fbuffer)
2826{
2827 struct trace_event_call *event_call;
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002828 struct trace_event_file *file;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002829 struct trace_event *event;
2830 unsigned long flags;
2831 struct trace_iterator *iter = tracepoint_print_iter;
2832
2833 /* We should never get here if iter is NULL */
2834 if (WARN_ON_ONCE(!iter))
2835 return;
2836
2837 event_call = fbuffer->trace_file->event_call;
2838 if (!event_call || !event_call->event.funcs ||
2839 !event_call->event.funcs->trace)
2840 return;
2841
Masami Hiramatsud8d0c242020-01-11 01:05:18 +09002842 file = fbuffer->trace_file;
2843 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2844 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2845 !filter_match_preds(file->filter, fbuffer->entry)))
2846 return;
2847
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002848 event = &fbuffer->trace_file->event_call->event;
2849
2850 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2851 trace_seq_init(&iter->seq);
2852 iter->ent = fbuffer->entry;
2853 event_call->event.funcs->trace(iter, 0, event);
2854 trace_seq_putc(&iter->seq, 0);
2855 printk("%s", iter->seq.buffer);
2856
2857 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2858}
2859
2860int tracepoint_printk_sysctl(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002861 void *buffer, size_t *lenp,
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002862 loff_t *ppos)
2863{
2864 int save_tracepoint_printk;
2865 int ret;
2866
2867 mutex_lock(&tracepoint_printk_mutex);
2868 save_tracepoint_printk = tracepoint_printk;
2869
2870 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2871
2872 /*
2873 * This will force exiting early, as tracepoint_printk
2874 * is always zero when tracepoint_printk_iter is not allocated
2875 */
2876 if (!tracepoint_print_iter)
2877 tracepoint_printk = 0;
2878
2879 if (save_tracepoint_printk == tracepoint_printk)
2880 goto out;
2881
2882 if (tracepoint_printk)
2883 static_key_enable(&tracepoint_printk_key.key);
2884 else
2885 static_key_disable(&tracepoint_printk_key.key);
2886
2887 out:
2888 mutex_unlock(&tracepoint_printk_mutex);
2889
2890 return ret;
2891}
2892
2893void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2894{
Pingfan Liu6c34df62021-08-14 11:45:38 +08002895 enum event_trigger_type tt = ETT_NONE;
2896 struct trace_event_file *file = fbuffer->trace_file;
2897
2898 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2899 fbuffer->entry, &tt))
2900 goto discard;
2901
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002902 if (static_key_false(&tracepoint_printk_key.key))
2903 output_printk(fbuffer);
2904
Tingwei Zhang8ab7a2b2020-10-05 10:13:14 +03002905 if (static_branch_unlikely(&trace_event_exports_enabled))
2906 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
Pingfan Liu6c34df62021-08-14 11:45:38 +08002907
2908 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2909 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2910
2911discard:
2912 if (tt)
2913 event_triggers_post_call(file, tt);
2914
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -05002915}
2916EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2917
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002918/*
2919 * Skip 3:
2920 *
2921 * trace_buffer_unlock_commit_regs()
2922 * trace_event_buffer_commit()
2923 * trace_event_raw_event_xxx()
Rohit Visavalia13cf9122018-01-29 15:11:26 +05302924 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002925# define STACK_SKIP 3
2926
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04002927void trace_buffer_unlock_commit_regs(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002928 struct trace_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002929 struct ring_buffer_event *event,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002930 unsigned int trace_ctx,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002931 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002932{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002933 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002934
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002935 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002936 * If regs is not set, then skip the necessary functions.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002937 * Note, we can still get here via blktrace, wakeup tracer
2938 * and mmiotrace, but that's ok if they lose a function or
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05002939 * two. They are not that meaningful.
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04002940 */
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002941 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2942 ftrace_trace_userstack(tr, buffer, trace_ctx);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002943}
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09002944
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002945/*
2946 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2947 */
2948void
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002949trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)52ffabe32016-11-23 20:28:38 -05002950 struct ring_buffer_event *event)
2951{
2952 __buffer_unlock_commit(buffer, event);
2953}
2954
Ingo Molnare309b412008-05-12 21:20:51 +02002955void
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002956trace_function(struct trace_array *tr, unsigned long ip, unsigned long
2957 parent_ip, unsigned int trace_ctx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002958{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002959 struct trace_event_call *call = &event_function;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002960 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002961 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04002962 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002963
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05002964 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002965 trace_ctx);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002966 if (!event)
2967 return;
2968 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04002969 entry->ip = ip;
2970 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05002971
Chunyan Zhang478409d2016-11-21 15:57:18 +08002972 if (!call_filter_check_discard(call, entry, buffer, event)) {
Tingwei Zhang8438f522020-10-05 10:13:13 +03002973 if (static_branch_unlikely(&trace_function_exports_enabled))
2974 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002975 __buffer_unlock_commit(buffer, event);
Chunyan Zhang478409d2016-11-21 15:57:18 +08002976 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977}
2978
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002979#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002980
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002981/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2982#define FTRACE_KSTACK_NESTING 4
2983
2984#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2985
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002986struct ftrace_stack {
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002987 unsigned long calls[FTRACE_KSTACK_ENTRIES];
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002988};
2989
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02002990
2991struct ftrace_stacks {
2992 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2993};
2994
2995static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002996static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2997
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05002998static void __ftrace_trace_stack(struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01002999 unsigned int trace_ctx,
3000 int skip, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02003001{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003002 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003003 struct ring_buffer_event *event;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003004 unsigned int size, nr_entries;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003005 struct ftrace_stack *fstack;
Steven Rostedt777e2082008-09-29 23:02:42 -04003006 struct stack_entry *entry;
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003007 int stackidx;
Ingo Molnar86387f72008-05-12 21:20:51 +02003008
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003009 /*
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003010 * Add one, for this function and the call to save_stack_trace()
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04003011 * If regs is set, then these functions will not be in the way.
3012 */
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003013#ifndef CONFIG_UNWINDER_ORC
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04003014 if (!regs)
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003015 skip++;
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003016#endif
Steven Rostedt (Red Hat)be54f692016-06-23 14:03:47 -04003017
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003018 preempt_disable_notrace();
3019
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003020 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3021
3022 /* This should never happen. If it does, yell once and skip */
Qiujun Huang906695e2020-10-31 16:57:14 +08003023 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003024 goto out;
3025
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003026 /*
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003027 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3028 * interrupt will either see the value pre increment or post
3029 * increment. If the interrupt happens pre increment it will have
3030 * restored the counter when it returns. We just need a barrier to
3031 * keep gcc from moving things around.
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003032 */
3033 barrier();
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003034
Thomas Gleixner2a820bf2019-04-25 11:45:14 +02003035 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003036 size = ARRAY_SIZE(fstack->calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003037
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003038 if (regs) {
3039 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3040 size, skip);
3041 } else {
3042 nr_entries = stack_trace_save(fstack->calls, size, skip);
3043 }
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003044
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003045 size = nr_entries * sizeof(unsigned long);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003046 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
Steven Rostedt (VMware)9deb1932021-04-01 13:54:40 -04003047 (sizeof(*entry) - sizeof(entry->caller)) + size,
3048 trace_ctx);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003049 if (!event)
3050 goto out;
3051 entry = ring_buffer_event_data(event);
3052
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003053 memcpy(&entry->caller, fstack->calls, size);
3054 entry->size = nr_entries;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003055
Tom Zanussif306cc82013-10-24 08:34:17 -05003056 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003057 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003058
3059 out:
3060 /* Again, don't let gcc optimize things here */
3061 barrier();
Shan Wei82146522012-11-19 13:21:01 +08003062 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003063 preempt_enable_notrace();
3064
Ingo Molnarf0a920d2008-05-12 21:20:47 +02003065}
3066
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003067static inline void ftrace_trace_stack(struct trace_array *tr,
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003068 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003069 unsigned int trace_ctx,
3070 int skip, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05003071{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04003072 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05003073 return;
3074
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003075 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05003076}
3077
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003078void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3079 int skip)
Steven Rostedt38697052008-10-01 13:14:09 -04003080{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003081 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003082
3083 if (rcu_is_watching()) {
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003084 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003085 return;
3086 }
3087
3088 /*
3089 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3090 * but if the above rcu_is_watching() failed, then the NMI
3091 * triggered someplace critical, and rcu_irq_enter() should
3092 * not be called from NMI.
3093 */
3094 if (unlikely(in_nmi()))
3095 return;
3096
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003097 rcu_irq_enter_irqson();
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003098 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
Steven Rostedt (VMware)a33d7d92017-05-12 13:15:45 -04003099 rcu_irq_exit_irqson();
Steven Rostedt38697052008-10-01 13:14:09 -04003100}
3101
Steven Rostedt03889382009-12-11 09:48:22 -05003102/**
3103 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003104 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05003105 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04003106void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05003107{
Steven Rostedt03889382009-12-11 09:48:22 -05003108 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05003109 return;
Steven Rostedt03889382009-12-11 09:48:22 -05003110
Steven Rostedt (VMware)2ee5b922018-01-23 13:25:04 -05003111#ifndef CONFIG_UNWINDER_ORC
3112 /* Skip 1 to skip this function. */
3113 skip++;
3114#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003115 __ftrace_trace_stack(global_trace.array_buffer.buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003116 tracing_gen_ctx(), skip, NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05003117}
Nikolay Borisovda387e52018-10-17 09:51:43 +03003118EXPORT_SYMBOL_GPL(trace_dump_stack);
Steven Rostedt03889382009-12-11 09:48:22 -05003119
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003120#ifdef CONFIG_USER_STACKTRACE_SUPPORT
Steven Rostedt91e86e52010-11-10 12:56:12 +01003121static DEFINE_PER_CPU(int, user_stack_count);
3122
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003123static void
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003124ftrace_trace_userstack(struct trace_array *tr,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003125 struct trace_buffer *buffer, unsigned int trace_ctx)
Török Edwin02b67512008-11-22 13:28:47 +02003126{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003127 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02003128 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02003129 struct userstack_entry *entry;
Török Edwin02b67512008-11-22 13:28:47 +02003130
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003131 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02003132 return;
3133
Steven Rostedtb6345872010-03-12 20:03:30 -05003134 /*
3135 * NMIs can not handle page faults, even with fix ups.
3136 * The save user stack can (and often does) fault.
3137 */
3138 if (unlikely(in_nmi()))
3139 return;
3140
Steven Rostedt91e86e52010-11-10 12:56:12 +01003141 /*
3142 * prevent recursion, since the user stack tracing may
3143 * trigger other kernel events.
3144 */
3145 preempt_disable();
3146 if (__this_cpu_read(user_stack_count))
3147 goto out;
3148
3149 __this_cpu_inc(user_stack_count);
3150
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003151 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003152 sizeof(*entry), trace_ctx);
Török Edwin02b67512008-11-22 13:28:47 +02003153 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08003154 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02003155 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02003156
Steven Rostedt48659d32009-09-11 11:36:23 -04003157 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02003158 memset(&entry->caller, 0, sizeof(entry->caller));
3159
Thomas Gleixneree6dd0d2019-04-25 11:45:16 +02003160 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
Tom Zanussif306cc82013-10-24 08:34:17 -05003161 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003162 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003163
Li Zefan1dbd1952010-12-09 15:47:56 +08003164 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01003165 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01003166 out:
3167 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02003168}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003169#else /* CONFIG_USER_STACKTRACE_SUPPORT */
Steven Rostedt (VMware)bcee5272020-12-04 16:36:16 -05003170static void ftrace_trace_userstack(struct trace_array *tr,
3171 struct trace_buffer *buffer,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003172 unsigned int trace_ctx)
Török Edwin02b67512008-11-22 13:28:47 +02003173{
Török Edwin02b67512008-11-22 13:28:47 +02003174}
Thomas Gleixnerc438f142019-04-25 11:45:15 +02003175#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
Török Edwin02b67512008-11-22 13:28:47 +02003176
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02003177#endif /* CONFIG_STACKTRACE */
3178
Yordan Karadzhov (VMware)c6587972021-04-15 21:18:52 +03003179static inline void
3180func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3181 unsigned long long delta)
3182{
3183 entry->bottom_delta_ts = delta & U32_MAX;
3184 entry->top_delta_ts = (delta >> 32);
3185}
3186
3187void trace_last_func_repeats(struct trace_array *tr,
3188 struct trace_func_repeats *last_info,
3189 unsigned int trace_ctx)
3190{
3191 struct trace_buffer *buffer = tr->array_buffer.buffer;
3192 struct func_repeats_entry *entry;
3193 struct ring_buffer_event *event;
3194 u64 delta;
3195
3196 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3197 sizeof(*entry), trace_ctx);
3198 if (!event)
3199 return;
3200
3201 delta = ring_buffer_event_time_stamp(buffer, event) -
3202 last_info->ts_last_call;
3203
3204 entry = ring_buffer_event_data(event);
3205 entry->ip = last_info->ip;
3206 entry->parent_ip = last_info->parent_ip;
3207 entry->count = last_info->count;
3208 func_repeats_set_delta_ts(entry, delta);
3209
3210 __buffer_unlock_commit(buffer, event);
3211}
3212
Steven Rostedt07d777f2011-09-22 14:01:55 -04003213/* created for use with alloc_percpu */
3214struct trace_buffer_struct {
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003215 int nesting;
3216 char buffer[4][TRACE_BUF_SIZE];
Steven Rostedt07d777f2011-09-22 14:01:55 -04003217};
3218
Naveen N. Raof28439d2021-12-23 16:04:39 +05303219static struct trace_buffer_struct __percpu *trace_percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003220
3221/*
Qiujun Huang2b5894c2020-10-29 23:05:54 +08003222 * This allows for lockless recording. If we're nested too deeply, then
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003223 * this returns NULL.
Steven Rostedt07d777f2011-09-22 14:01:55 -04003224 */
3225static char *get_trace_buf(void)
3226{
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003227 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003228
Naveen N. Rao823e6702021-12-23 16:04:38 +05303229 if (!trace_percpu_buffer || buffer->nesting >= 4)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003230 return NULL;
3231
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003232 buffer->nesting++;
3233
3234 /* Interrupts must see nesting incremented before we use the buffer */
3235 barrier();
Qiujun Huangc1acb4a2020-10-30 00:19:05 +08003236 return &buffer->buffer[buffer->nesting - 1][0];
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003237}
3238
3239static void put_trace_buf(void)
3240{
Steven Rostedt (VMware)3d9622c2017-09-05 11:32:01 -04003241 /* Don't let the decrement of nesting leak before this */
3242 barrier();
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003243 this_cpu_dec(trace_percpu_buffer->nesting);
Steven Rostedt07d777f2011-09-22 14:01:55 -04003244}
3245
3246static int alloc_percpu_trace_buffer(void)
3247{
Naveen N. Raof28439d2021-12-23 16:04:39 +05303248 struct trace_buffer_struct __percpu *buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003249
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003250 if (trace_percpu_buffer)
3251 return 0;
3252
Steven Rostedt07d777f2011-09-22 14:01:55 -04003253 buffers = alloc_percpu(struct trace_buffer_struct);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05003254 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003255 return -ENOMEM;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003256
3257 trace_percpu_buffer = buffers;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003258 return 0;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003259}
3260
Steven Rostedt81698832012-10-11 10:15:05 -04003261static int buffers_allocated;
3262
Steven Rostedt07d777f2011-09-22 14:01:55 -04003263void trace_printk_init_buffers(void)
3264{
Steven Rostedt07d777f2011-09-22 14:01:55 -04003265 if (buffers_allocated)
3266 return;
3267
3268 if (alloc_percpu_trace_buffer())
3269 return;
3270
Steven Rostedt2184db42014-05-28 13:14:40 -04003271 /* trace_printk() is for debug use only. Don't use it in production. */
3272
Joe Perchesa395d6a2016-03-22 14:28:09 -07003273 pr_warn("\n");
3274 pr_warn("**********************************************************\n");
3275 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3276 pr_warn("** **\n");
3277 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3278 pr_warn("** **\n");
3279 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3280 pr_warn("** unsafe for production use. **\n");
3281 pr_warn("** **\n");
3282 pr_warn("** If you see this message and you are not debugging **\n");
3283 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3284 pr_warn("** **\n");
3285 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3286 pr_warn("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04003287
Steven Rostedtb382ede62012-10-10 21:44:34 -04003288 /* Expand the buffers to set size */
3289 tracing_update_buffers();
3290
Steven Rostedt07d777f2011-09-22 14:01:55 -04003291 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04003292
3293 /*
3294 * trace_printk_init_buffers() can be called by modules.
3295 * If that happens, then we need to start cmdline recording
3296 * directly here. If the global_trace.buffer is already
3297 * allocated here, then this was called by module code.
3298 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003299 if (global_trace.array_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04003300 tracing_start_cmdline_record();
3301}
Divya Indif45d1222019-03-20 11:28:51 -07003302EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
Steven Rostedt81698832012-10-11 10:15:05 -04003303
3304void trace_printk_start_comm(void)
3305{
3306 /* Start tracing comms if trace printk is set */
3307 if (!buffers_allocated)
3308 return;
3309 tracing_start_cmdline_record();
3310}
3311
3312static void trace_printk_start_stop_comm(int enabled)
3313{
3314 if (!buffers_allocated)
3315 return;
3316
3317 if (enabled)
3318 tracing_start_cmdline_record();
3319 else
3320 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003321}
3322
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003323/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003324 * trace_vbprintk - write binary msg to tracing buffer
Jakub Kicinskic68c9ec2019-08-27 22:25:47 -07003325 * @ip: The address of the caller
3326 * @fmt: The string format to write to the buffer
3327 * @args: Arguments for @fmt
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003328 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04003329int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003330{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003331 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003332 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003333 struct trace_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003334 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003335 struct bprint_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003336 unsigned int trace_ctx;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003337 char *tbuffer;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003338 int len = 0, size;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003339
3340 if (unlikely(tracing_selftest_running || tracing_disabled))
3341 return 0;
3342
3343 /* Don't pollute graph traces with trace_vprintk internals */
3344 pause_graph_tracing();
3345
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003346 trace_ctx = tracing_gen_ctx();
Steven Rostedt5168ae52010-06-03 09:36:50 -04003347 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003348
Steven Rostedt07d777f2011-09-22 14:01:55 -04003349 tbuffer = get_trace_buf();
3350 if (!tbuffer) {
3351 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003352 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003353 }
3354
3355 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3356
3357 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003358 goto out_put;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003359
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003360 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003361 buffer = tr->array_buffer.buffer;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003362 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003363 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003364 trace_ctx);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003365 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003366 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003367 entry = ring_buffer_event_data(event);
3368 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003369 entry->fmt = fmt;
3370
Steven Rostedt07d777f2011-09-22 14:01:55 -04003371 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05003372 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003373 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003374 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003375 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003376
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003377out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003378 ring_buffer_nest_end(buffer);
Steven Rostedt (VMware)34423f22020-01-22 06:44:50 -05003379out_put:
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003380 put_trace_buf();
3381
3382out_nobuffer:
Steven Rostedt5168ae52010-06-03 09:36:50 -04003383 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003384 unpause_graph_tracing();
3385
3386 return len;
3387}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003388EXPORT_SYMBOL_GPL(trace_vbprintk);
3389
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003390__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003391static int
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003392__trace_array_vprintk(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003393 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003394{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04003395 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003396 struct ring_buffer_event *event;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003397 int len = 0, size;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003398 struct print_entry *entry;
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003399 unsigned int trace_ctx;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003400 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003401
3402 if (tracing_disabled || tracing_selftest_running)
3403 return 0;
3404
Steven Rostedt07d777f2011-09-22 14:01:55 -04003405 /* Don't pollute graph traces with trace_vprintk internals */
3406 pause_graph_tracing();
3407
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003408 trace_ctx = tracing_gen_ctx();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003409 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003410
Steven Rostedt07d777f2011-09-22 14:01:55 -04003411
3412 tbuffer = get_trace_buf();
3413 if (!tbuffer) {
3414 len = 0;
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003415 goto out_nobuffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04003416 }
3417
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003418 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003419
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003420 size = sizeof(*entry) + len + 1;
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003421 ring_buffer_nest_start(buffer);
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05003422 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003423 trace_ctx);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003424 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04003425 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003426 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01003427 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003428
Dan Carpenter3558a5a2014-11-27 18:57:52 +03003429 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05003430 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04003431 __buffer_unlock_commit(buffer, event);
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01003432 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05003433 }
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003434
3435out:
Steven Rostedt (VMware)82d1b812020-01-16 08:20:18 -05003436 ring_buffer_nest_end(buffer);
Andy Lutomirskie2ace002016-05-26 12:00:33 -07003437 put_trace_buf();
3438
3439out_nobuffer:
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003440 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04003441 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01003442
3443 return len;
3444}
Steven Rostedt659372d2009-09-03 19:11:07 -04003445
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003446__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003447int trace_array_vprintk(struct trace_array *tr,
3448 unsigned long ip, const char *fmt, va_list args)
3449{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003450 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003451}
3452
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003453/**
3454 * trace_array_printk - Print a message to a specific instance
3455 * @tr: The instance trace_array descriptor
3456 * @ip: The instruction pointer that this is called from.
3457 * @fmt: The format to print (printf format)
3458 *
3459 * If a subsystem sets up its own instance, they have the right to
3460 * printk strings into their tracing instance buffer using this
3461 * function. Note, this function will not write into the top level
3462 * buffer (use trace_printk() for that), as writing into the top level
3463 * buffer should only have events that can be individually disabled.
3464 * trace_printk() is only used for debugging a kernel, and should not
Ingo Molnarf2cc0202021-03-23 18:49:35 +01003465 * be ever incorporated in normal use.
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003466 *
3467 * trace_array_printk() can be used, as it will not add noise to the
3468 * top level tracing buffer.
3469 *
3470 * Note, trace_array_init_printk() must be called on @tr before this
3471 * can be used.
3472 */
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003473__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003474int trace_array_printk(struct trace_array *tr,
3475 unsigned long ip, const char *fmt, ...)
3476{
3477 int ret;
3478 va_list ap;
3479
Divya Indi953ae452019-08-14 10:55:25 -07003480 if (!tr)
3481 return -ENOENT;
3482
Steven Rostedt (VMware)c791cc42020-06-16 14:53:55 -04003483 /* This is only allowed for created instances */
3484 if (tr == &global_trace)
3485 return 0;
3486
3487 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3488 return 0;
3489
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003490 va_start(ap, fmt);
3491 ret = trace_array_vprintk(tr, ip, fmt, ap);
3492 va_end(ap);
3493 return ret;
3494}
Divya Indif45d1222019-03-20 11:28:51 -07003495EXPORT_SYMBOL_GPL(trace_array_printk);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003496
Steven Rostedt (VMware)38ce2a92020-08-06 12:46:49 -04003497/**
3498 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3499 * @tr: The trace array to initialize the buffers for
3500 *
3501 * As trace_array_printk() only writes into instances, they are OK to
3502 * have in the kernel (unlike trace_printk()). This needs to be called
3503 * before trace_array_printk() can be used on a trace_array.
3504 */
3505int trace_array_init_printk(struct trace_array *tr)
3506{
3507 if (!tr)
3508 return -ENOENT;
3509
3510 /* This is only allowed for created instances */
3511 if (tr == &global_trace)
3512 return -EINVAL;
3513
3514 return alloc_percpu_trace_buffer();
3515}
3516EXPORT_SYMBOL_GPL(trace_array_init_printk);
3517
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003518__printf(3, 4)
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003519int trace_array_printk_buf(struct trace_buffer *buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003520 unsigned long ip, const char *fmt, ...)
3521{
3522 int ret;
3523 va_list ap;
3524
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003525 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003526 return 0;
3527
3528 va_start(ap, fmt);
3529 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3530 va_end(ap);
3531 return ret;
3532}
3533
Mathieu Malaterre26b68dd2018-03-08 21:58:43 +01003534__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04003535int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3536{
Steven Rostedta813a152009-10-09 01:41:35 -04003537 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04003538}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01003539EXPORT_SYMBOL_GPL(trace_vprintk);
3540
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003541static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04003542{
Steven Rostedt6d158a82012-06-27 20:46:14 -04003543 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3544
Steven Rostedt5a90f572008-09-03 17:42:51 -04003545 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003546 if (buf_iter)
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04003547 ring_buffer_iter_advance(buf_iter);
Steven Rostedt5a90f572008-09-03 17:42:51 -04003548}
3549
Ingo Molnare309b412008-05-12 21:20:51 +02003550static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003551peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3552 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003553{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003554 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04003555 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003556
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003557 if (buf_iter) {
Steven Rostedtd7690412008-10-01 00:29:53 -04003558 event = ring_buffer_iter_peek(buf_iter, ts);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003559 if (lost_events)
3560 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3561 (unsigned long)-1 : 0;
3562 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003563 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003564 lost_events);
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04003565 }
Steven Rostedtd7690412008-10-01 00:29:53 -04003566
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04003567 if (event) {
3568 iter->ent_size = ring_buffer_event_length(event);
3569 return ring_buffer_event_data(event);
3570 }
3571 iter->ent_size = 0;
3572 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003573}
Steven Rostedtd7690412008-10-01 00:29:53 -04003574
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003575static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04003576__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3577 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003578{
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05003579 struct trace_buffer *buffer = iter->array_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003580 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08003581 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003582 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003583 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003584 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003585 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003586 int cpu;
3587
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003588 /*
3589 * If we are in a per_cpu trace file, don't bother by iterating over
3590 * all cpu and peek directly.
3591 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05003592 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003593 if (ring_buffer_empty_cpu(buffer, cpu_file))
3594 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003595 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003596 if (ent_cpu)
3597 *ent_cpu = cpu_file;
3598
3599 return ent;
3600 }
3601
Steven Rostedtab464282008-05-12 21:21:00 +02003602 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003603
3604 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003605 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003606
Steven Rostedtbc21b472010-03-31 19:49:26 -04003607 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003608
Ingo Molnarcdd31cd22008-05-12 21:20:46 +02003609 /*
3610 * Pick the entry with the smallest timestamp:
3611 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003612 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003613 next = ent;
3614 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003615 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04003616 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04003617 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003618 }
3619 }
3620
Steven Rostedt12b5da32012-03-27 10:43:28 -04003621 iter->ent_size = next_size;
3622
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003623 if (ent_cpu)
3624 *ent_cpu = next_cpu;
3625
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003626 if (ent_ts)
3627 *ent_ts = next_ts;
3628
Steven Rostedtbc21b472010-03-31 19:49:26 -04003629 if (missing_events)
3630 *missing_events = next_lost;
3631
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003632 return next;
3633}
3634
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003635#define STATIC_FMT_BUF_SIZE 128
3636static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3637
3638static char *trace_iter_expand_format(struct trace_iterator *iter)
3639{
3640 char *tmp;
3641
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -04003642 /*
3643 * iter->tr is NULL when used with tp_printk, which makes
3644 * this get called where it is not safe to call krealloc().
3645 */
3646 if (!iter->tr || iter->fmt == static_fmt_buf)
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003647 return NULL;
3648
3649 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3650 GFP_KERNEL);
3651 if (tmp) {
3652 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3653 iter->fmt = tmp;
3654 }
3655
3656 return tmp;
3657}
3658
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003659/* Returns true if the string is safe to dereference from an event */
3660static bool trace_safe_str(struct trace_iterator *iter, const char *str)
3661{
3662 unsigned long addr = (unsigned long)str;
3663 struct trace_event *trace_event;
3664 struct trace_event_call *event;
3665
3666 /* OK if part of the event data */
3667 if ((addr >= (unsigned long)iter->ent) &&
3668 (addr < (unsigned long)iter->ent + iter->ent_size))
3669 return true;
3670
3671 /* OK if part of the temp seq buffer */
3672 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3673 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3674 return true;
3675
3676 /* Core rodata can not be freed */
3677 if (is_kernel_rodata(addr))
3678 return true;
3679
3680 if (trace_is_tracepoint_string(str))
3681 return true;
3682
3683 /*
3684 * Now this could be a module event, referencing core module
3685 * data, which is OK.
3686 */
3687 if (!iter->ent)
3688 return false;
3689
3690 trace_event = ftrace_find_event(iter->ent->type);
3691 if (!trace_event)
3692 return false;
3693
3694 event = container_of(trace_event, struct trace_event_call, event);
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -04003695 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003696 return false;
3697
3698 /* Would rather have rodata, but this will suffice */
Steven Rostedt (VMware)1d185382021-08-16 23:42:57 -04003699 if (within_module_core(addr, event->module))
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003700 return true;
3701
3702 return false;
3703}
3704
3705static const char *show_buffer(struct trace_seq *s)
3706{
3707 struct seq_buf *seq = &s->seq;
3708
3709 seq_buf_terminate(seq);
3710
3711 return seq->buffer;
3712}
3713
3714static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3715
3716static int test_can_verify_check(const char *fmt, ...)
3717{
3718 char buf[16];
3719 va_list ap;
3720 int ret;
3721
3722 /*
3723 * The verifier is dependent on vsnprintf() modifies the va_list
3724 * passed to it, where it is sent as a reference. Some architectures
3725 * (like x86_32) passes it by value, which means that vsnprintf()
3726 * does not modify the va_list passed to it, and the verifier
3727 * would then need to be able to understand all the values that
3728 * vsnprintf can use. If it is passed by value, then the verifier
3729 * is disabled.
3730 */
3731 va_start(ap, fmt);
3732 vsnprintf(buf, 16, "%d", ap);
3733 ret = va_arg(ap, int);
3734 va_end(ap);
3735
3736 return ret;
3737}
3738
3739static void test_can_verify(void)
3740{
3741 if (!test_can_verify_check("%d %d", 0, 1)) {
3742 pr_info("trace event string verifier disabled\n");
3743 static_branch_inc(&trace_no_verify);
3744 }
3745}
3746
3747/**
3748 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3749 * @iter: The iterator that holds the seq buffer and the event being printed
3750 * @fmt: The format used to print the event
3751 * @ap: The va_list holding the data to print from @fmt.
3752 *
3753 * This writes the data into the @iter->seq buffer using the data from
3754 * @fmt and @ap. If the format has a %s, then the source of the string
3755 * is examined to make sure it is safe to print, otherwise it will
3756 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3757 * pointer.
3758 */
3759void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3760 va_list ap)
3761{
3762 const char *p = fmt;
3763 const char *str;
3764 int i, j;
3765
3766 if (WARN_ON_ONCE(!fmt))
3767 return;
3768
3769 if (static_branch_unlikely(&trace_no_verify))
3770 goto print;
3771
3772 /* Don't bother checking when doing a ftrace_dump() */
3773 if (iter->fmt == static_fmt_buf)
3774 goto print;
3775
3776 while (*p) {
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003777 bool star = false;
3778 int len = 0;
3779
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003780 j = 0;
3781
3782 /* We only care about %s and variants */
3783 for (i = 0; p[i]; i++) {
3784 if (i + 1 >= iter->fmt_size) {
3785 /*
3786 * If we can't expand the copy buffer,
3787 * just print it.
3788 */
3789 if (!trace_iter_expand_format(iter))
3790 goto print;
3791 }
3792
3793 if (p[i] == '\\' && p[i+1]) {
3794 i++;
3795 continue;
3796 }
3797 if (p[i] == '%') {
3798 /* Need to test cases like %08.*s */
3799 for (j = 1; p[i+j]; j++) {
3800 if (isdigit(p[i+j]) ||
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003801 p[i+j] == '.')
3802 continue;
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003803 if (p[i+j] == '*') {
3804 star = true;
3805 continue;
3806 }
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003807 break;
3808 }
3809 if (p[i+j] == 's')
3810 break;
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003811 star = false;
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003812 }
3813 j = 0;
3814 }
3815 /* If no %s found then just print normally */
3816 if (!p[i])
3817 break;
3818
3819 /* Copy up to the %s, and print that */
3820 strncpy(iter->fmt, p, i);
3821 iter->fmt[i] = '\0';
3822 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3823
Nikita Yushchenko2ef75e92021-11-18 17:55:16 +03003824 /*
3825 * If iter->seq is full, the above call no longer guarantees
3826 * that ap is in sync with fmt processing, and further calls
3827 * to va_arg() can return wrong positional arguments.
3828 *
3829 * Ensure that ap is no longer used in this case.
3830 */
3831 if (iter->seq.full) {
3832 p = "";
3833 break;
3834 }
3835
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003836 if (star)
3837 len = va_arg(ap, int);
3838
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003839 /* The ap now points to the string data of the %s */
3840 str = va_arg(ap, const char *);
3841
3842 /*
3843 * If you hit this warning, it is likely that the
3844 * trace event in question used %s on a string that
3845 * was saved at the time of the event, but may not be
3846 * around when the trace is read. Use __string(),
3847 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3848 * instead. See samples/trace_events/trace-events-sample.h
3849 * for reference.
3850 */
3851 if (WARN_ONCE(!trace_safe_str(iter, str),
3852 "fmt: '%s' current_buffer: '%s'",
3853 fmt, show_buffer(&iter->seq))) {
3854 int ret;
3855
3856 /* Try to safely read the string */
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003857 if (star) {
3858 if (len + 1 > iter->fmt_size)
3859 len = iter->fmt_size - 1;
3860 if (len < 0)
3861 len = 0;
3862 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3863 iter->fmt[len] = 0;
3864 star = false;
3865 } else {
3866 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3867 iter->fmt_size);
3868 }
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003869 if (ret < 0)
3870 trace_seq_printf(&iter->seq, "(0x%px)", str);
3871 else
3872 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3873 str, iter->fmt);
3874 str = "[UNSAFE-MEMORY]";
3875 strcpy(iter->fmt, "%s");
3876 } else {
3877 strncpy(iter->fmt, p + i, j + 1);
3878 iter->fmt[j+1] = '\0';
3879 }
Steven Rostedt (VMware)eb01f532021-05-13 12:23:24 -04003880 if (star)
3881 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3882 else
3883 trace_seq_printf(&iter->seq, iter->fmt, str);
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -05003884
3885 p += i + j + 1;
3886 }
3887 print:
3888 if (*p)
3889 trace_seq_vprintf(&iter->seq, p, ap);
3890}
3891
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003892const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3893{
3894 const char *p, *new_fmt;
3895 char *q;
3896
3897 if (WARN_ON_ONCE(!fmt))
3898 return fmt;
3899
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -04003900 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
Masami Hiramatsua345a672020-10-15 23:55:25 +09003901 return fmt;
3902
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09003903 p = fmt;
3904 new_fmt = q = iter->fmt;
3905 while (*p) {
3906 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3907 if (!trace_iter_expand_format(iter))
3908 return fmt;
3909
3910 q += iter->fmt - new_fmt;
3911 new_fmt = iter->fmt;
3912 }
3913
3914 *q++ = *p++;
3915
3916 /* Replace %p with %px */
3917 if (p[-1] == '%') {
3918 if (p[0] == '%') {
3919 *q++ = *p++;
3920 } else if (p[0] == 'p' && !isalnum(p[1])) {
3921 *q++ = *p++;
3922 *q++ = 'x';
3923 }
3924 }
3925 }
3926 *q = '\0';
3927
3928 return new_fmt;
3929}
3930
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003931#define STATIC_TEMP_BUF_SIZE 128
Minchan Kim8fa655a2020-11-25 14:56:54 -08003932static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003933
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003934/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02003935struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3936 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003937{
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003938 /* __find_next_entry will reset ent_size */
3939 int ent_size = iter->ent_size;
3940 struct trace_entry *entry;
3941
3942 /*
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003943 * If called from ftrace_dump(), then the iter->temp buffer
3944 * will be the static_temp_buf and not created from kmalloc.
3945 * If the entry size is greater than the buffer, we can
3946 * not save it. Just return NULL in that case. This is only
3947 * used to add markers when two consecutive events' time
3948 * stamps have a large delta. See trace_print_lat_context()
3949 */
3950 if (iter->temp == static_temp_buf &&
3951 STATIC_TEMP_BUF_SIZE < ent_size)
3952 return NULL;
3953
3954 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003955 * The __find_next_entry() may call peek_next_entry(), which may
3956 * call ring_buffer_peek() that may make the contents of iter->ent
3957 * undefined. Need to copy iter->ent now.
3958 */
3959 if (iter->ent && iter->ent != iter->temp) {
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04003960 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3961 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003962 void *temp;
3963 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3964 if (!temp)
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003965 return NULL;
Steven Rostedt (VMware)851e6f62020-09-29 12:27:23 -04003966 kfree(iter->temp);
3967 iter->temp = temp;
3968 iter->temp_size = iter->ent_size;
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003969 }
3970 memcpy(iter->temp, iter->ent, iter->ent_size);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04003971 iter->ent = iter->temp;
3972 }
3973 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3974 /* Put back the original ent_size */
3975 iter->ent_size = ent_size;
3976
3977 return entry;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003978}
Ingo Molnar8c523a92008-05-12 21:20:46 +02003979
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003980/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05003981void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003982{
Steven Rostedtbc21b472010-03-31 19:49:26 -04003983 iter->ent = __find_next_entry(iter, &iter->cpu,
3984 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003985
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003986 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01003987 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04003988
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003989 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003990}
3991
Ingo Molnare309b412008-05-12 21:20:51 +02003992static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02003993{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05003994 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04003995 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003996}
3997
Ingo Molnare309b412008-05-12 21:20:51 +02003998static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003999{
4000 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004001 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004002 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004003
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004004 WARN_ON_ONCE(iter->leftover);
4005
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004006 (*pos)++;
4007
4008 /* can't go backwards */
4009 if (iter->idx > i)
4010 return NULL;
4011
4012 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05004013 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004014 else
4015 ent = iter;
4016
4017 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05004018 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004019
4020 iter->pos = *pos;
4021
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004022 return ent;
4023}
4024
Jason Wessel955b61e2010-08-05 09:22:23 -05004025void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004026{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004027 struct ring_buffer_iter *buf_iter;
4028 unsigned long entries = 0;
4029 u64 ts;
4030
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004031 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004032
Steven Rostedt6d158a82012-06-27 20:46:14 -04004033 buf_iter = trace_buffer_iter(iter, cpu);
4034 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004035 return;
4036
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004037 ring_buffer_iter_reset(buf_iter);
4038
4039 /*
4040 * We could have the case with the max latency tracers
4041 * that a reset never took place on a cpu. This is evident
4042 * by the timestamp being before the start of the buffer.
4043 */
YangHui69243722020-06-16 11:36:46 +08004044 while (ring_buffer_iter_peek(buf_iter, &ts)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004045 if (ts >= iter->array_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004046 break;
4047 entries++;
Steven Rostedt (VMware)bc1a72a2020-03-17 17:32:25 -04004048 ring_buffer_iter_advance(buf_iter);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004049 }
4050
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004051 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004052}
4053
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004054/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004055 * The current tracer is copied to avoid a global locking
4056 * all around.
4057 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004058static void *s_start(struct seq_file *m, loff_t *pos)
4059{
4060 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004061 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004062 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004063 void *p = NULL;
4064 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004065 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004066
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09004067 /*
4068 * copy the tracer to avoid using a global lock all around.
4069 * iter->trace is a copy of current_trace, the pointer to the
4070 * name may be used instead of a strcmp(), as iter->trace->name
4071 * will point to the same string as current_trace->name.
4072 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004073 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004074 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4075 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004076 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004077
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004078#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004079 if (iter->snapshot && iter->trace->use_max_tr)
4080 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004081#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004082
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004083 if (*pos != iter->pos) {
4084 iter->ent = NULL;
4085 iter->cpu = 0;
4086 iter->idx = -1;
4087
Steven Rostedtae3b5092013-01-23 15:22:59 -05004088 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004089 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004090 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004091 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004092 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004093
Lai Jiangshanac91d852010-03-02 17:54:50 +08004094 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004095 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4096 ;
4097
4098 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004099 /*
4100 * If we overflowed the seq_file before, then we want
4101 * to just reuse the trace_seq buffer again.
4102 */
4103 if (iter->leftover)
4104 p = iter;
4105 else {
4106 l = *pos - 1;
4107 p = s_next(m, p, &l);
4108 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004109 }
4110
Lai Jiangshan4f535962009-05-18 19:35:34 +08004111 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004112 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113 return p;
4114}
4115
4116static void s_stop(struct seq_file *m, void *p)
4117{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004118 struct trace_iterator *iter = m->private;
4119
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004120#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004121 if (iter->snapshot && iter->trace->use_max_tr)
4122 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004123#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004124
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004125 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004126 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004127}
4128
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004129static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004130get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004131 unsigned long *entries, int cpu)
4132{
4133 unsigned long count;
4134
4135 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4136 /*
4137 * If this buffer has skipped entries, then we hold all
4138 * entries for the trace and we need to ignore the
4139 * ones before the time stamp.
4140 */
4141 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4142 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4143 /* total is the same as the entries */
4144 *total = count;
4145 } else
4146 *total = count +
4147 ring_buffer_overrun_cpu(buf->buffer, cpu);
4148 *entries = count;
4149}
4150
4151static void
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004152get_total_entries(struct array_buffer *buf,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004153 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004154{
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004155 unsigned long t, e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004156 int cpu;
4157
4158 *total = 0;
4159 *entries = 0;
4160
4161 for_each_tracing_cpu(cpu) {
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004162 get_total_entries_cpu(buf, &t, &e, cpu);
4163 *total += t;
4164 *entries += e;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004165 }
4166}
4167
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004168unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4169{
4170 unsigned long total, entries;
4171
4172 if (!tr)
4173 tr = &global_trace;
4174
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004175 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004176
4177 return entries;
4178}
4179
4180unsigned long trace_total_entries(struct trace_array *tr)
4181{
4182 unsigned long total, entries;
4183
4184 if (!tr)
4185 tr = &global_trace;
4186
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004187 get_total_entries(&tr->array_buffer, &total, &entries);
Douglas Andersonecffc8a2019-03-19 10:12:05 -07004188
4189 return entries;
4190}
4191
Ingo Molnare309b412008-05-12 21:20:51 +02004192static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004193{
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004194 seq_puts(m, "# _------=> CPU# \n"
Sebastian Andrzej Siewior289e7b02021-12-13 11:08:53 +01004195 "# / _-----=> irqs-off/BH-disabled\n"
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004196 "# | / _----=> need-resched \n"
4197 "# || / _---=> hardirq/softirq \n"
4198 "# ||| / _--=> preempt-depth \n"
Thomas Gleixner54357f02021-08-10 15:26:25 +02004199 "# |||| / _-=> migrate-disable \n"
4200 "# ||||| / delay \n"
4201 "# cmd pid |||||| time | caller \n"
4202 "# \\ / |||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004203}
4204
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004205static void print_event_info(struct array_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004206{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004207 unsigned long total;
4208 unsigned long entries;
4209
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004210 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004211 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4212 entries, total, num_online_cpus());
4213 seq_puts(m, "#\n");
4214}
4215
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004216static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004217 unsigned int flags)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004218{
Joel Fernandes441dae82017-06-25 22:38:43 -07004219 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4220
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004221 print_event_info(buf, m);
Joel Fernandes441dae82017-06-25 22:38:43 -07004222
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004223 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4224 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004225}
4226
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004227static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004228 unsigned int flags)
Steven Rostedt77271ce2011-11-17 09:34:33 -05004229{
Joel Fernandes441dae82017-06-25 22:38:43 -07004230 bool tgid = flags & TRACE_ITER_RECORD_TGID;
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004231 const char *space = " ";
4232 int prec = tgid ? 12 : 2;
Joel Fernandes441dae82017-06-25 22:38:43 -07004233
Quentin Perret9e738212019-02-14 15:29:50 +00004234 print_event_info(buf, m);
4235
Sebastian Andrzej Siewior289e7b02021-12-13 11:08:53 +01004236 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
Sebastian Andrzej Siewior795d6372020-09-04 10:23:31 +02004237 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4238 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4239 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
Thomas Gleixner54357f02021-08-10 15:26:25 +02004240 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4241 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4242 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4243 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
Steven Rostedt77271ce2011-11-17 09:34:33 -05004244}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004245
Jiri Olsa62b915f2010-04-02 19:01:22 +02004246void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004247print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4248{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004249 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004250 struct array_buffer *buf = iter->array_buffer;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004251 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004252 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05004253 unsigned long entries;
4254 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004255 const char *name = "preemption";
4256
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05004257 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004258
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004259 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004260
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004261 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004262 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004263 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004264 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004265 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004266 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02004267 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004268 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02004269 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004270 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004271#if defined(CONFIG_PREEMPT_NONE)
4272 "server",
4273#elif defined(CONFIG_PREEMPT_VOLUNTARY)
4274 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04004275#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004276 "preempt",
Sebastian Andrzej Siewior9c34fc42019-10-15 21:18:20 +02004277#elif defined(CONFIG_PREEMPT_RT)
4278 "preempt_rt",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004279#else
4280 "unknown",
4281#endif
4282 /* These are reserved for later use */
4283 0, 0, 0, 0);
4284#ifdef CONFIG_SMP
4285 seq_printf(m, " #P:%d)\n", num_online_cpus());
4286#else
4287 seq_puts(m, ")\n");
4288#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004289 seq_puts(m, "# -----------------\n");
4290 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004291 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07004292 data->comm, data->pid,
4293 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004294 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004295 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004296
4297 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004298 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02004299 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4300 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004301 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02004302 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4303 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04004304 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004305 }
4306
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09004307 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004308}
4309
Steven Rostedta3097202008-11-07 22:36:02 -05004310static void test_cpu_buff_start(struct trace_iterator *iter)
4311{
4312 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004313 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05004314
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004315 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004316 return;
4317
4318 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4319 return;
4320
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07004321 if (cpumask_available(iter->started) &&
4322 cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05004323 return;
4324
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004325 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004326 return;
4327
Matthias Kaehlcke4dbbe2d2017-04-21 16:41:10 -07004328 if (cpumask_available(iter->started))
Sasha Levin919cd972015-09-04 12:45:56 -04004329 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004330
4331 /* Don't print started cpu buffer for the first entry of the trace */
4332 if (iter->idx > 1)
4333 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4334 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05004335}
4336
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004337static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004338{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004339 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02004340 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004341 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004342 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004343 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004344
Ingo Molnar4e3c3332008-05-12 21:20:45 +02004345 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004346
Steven Rostedta3097202008-11-07 22:36:02 -05004347 test_cpu_buff_start(iter);
4348
Steven Rostedtf633cef2008-12-23 23:24:13 -05004349 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004350
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004351 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004352 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4353 trace_print_lat_context(iter);
4354 else
4355 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004356 }
4357
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004358 if (trace_seq_has_overflowed(s))
4359 return TRACE_TYPE_PARTIAL_LINE;
4360
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004361 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004362 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004363
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004364 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004365
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004366 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004367}
4368
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004369static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004370{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004371 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004372 struct trace_seq *s = &iter->seq;
4373 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004374 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004375
4376 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004377
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004378 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004379 trace_seq_printf(s, "%d %d %llu ",
4380 entry->pid, iter->cpu, iter->ts);
4381
4382 if (trace_seq_has_overflowed(s))
4383 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004384
Steven Rostedtf633cef2008-12-23 23:24:13 -05004385 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004386 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04004387 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004388
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004389 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04004390
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004391 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004392}
4393
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004394static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004395{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004396 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004397 struct trace_seq *s = &iter->seq;
4398 unsigned char newline = '\n';
4399 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004400 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004401
4402 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004403
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004404 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004405 SEQ_PUT_HEX_FIELD(s, entry->pid);
4406 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4407 SEQ_PUT_HEX_FIELD(s, iter->ts);
4408 if (trace_seq_has_overflowed(s))
4409 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004410 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004411
Steven Rostedtf633cef2008-12-23 23:24:13 -05004412 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02004413 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04004414 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02004415 if (ret != TRACE_TYPE_HANDLED)
4416 return ret;
4417 }
Steven Rostedt7104f302008-10-01 10:52:51 -04004418
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004419 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004420
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004421 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004422}
4423
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004424static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004425{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004426 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004427 struct trace_seq *s = &iter->seq;
4428 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05004429 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004430
4431 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04004432
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004433 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004434 SEQ_PUT_FIELD(s, entry->pid);
4435 SEQ_PUT_FIELD(s, iter->cpu);
4436 SEQ_PUT_FIELD(s, iter->ts);
4437 if (trace_seq_has_overflowed(s))
4438 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02004439 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004440
Steven Rostedtf633cef2008-12-23 23:24:13 -05004441 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04004442 return event ? event->funcs->binary(iter, 0, event) :
4443 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004444}
4445
Jiri Olsa62b915f2010-04-02 19:01:22 +02004446int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004447{
Steven Rostedt6d158a82012-06-27 20:46:14 -04004448 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004449 int cpu;
4450
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004451 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05004452 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004453 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04004454 buf_iter = trace_buffer_iter(iter, cpu);
4455 if (buf_iter) {
4456 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004457 return 0;
4458 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004459 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04004460 return 0;
4461 }
4462 return 1;
4463 }
4464
Steven Rostedtab464282008-05-12 21:21:00 +02004465 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04004466 buf_iter = trace_buffer_iter(iter, cpu);
4467 if (buf_iter) {
4468 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04004469 return 0;
4470 } else {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004471 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04004472 return 0;
4473 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004474 }
Steven Rostedtd7690412008-10-01 00:29:53 -04004475
Frederic Weisbecker797d3712008-09-30 18:13:45 +02004476 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004477}
4478
Lai Jiangshan4f535962009-05-18 19:35:34 +08004479/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05004480enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004481{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004482 struct trace_array *tr = iter->tr;
4483 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004484 enum print_line_t ret;
4485
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004486 if (iter->lost_events) {
Steven Rostedt (VMware)c9b7a4a2020-03-17 17:32:32 -04004487 if (iter->lost_events == (unsigned long)-1)
4488 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4489 iter->cpu);
4490 else
4491 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4492 iter->cpu, iter->lost_events);
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05004493 if (trace_seq_has_overflowed(&iter->seq))
4494 return TRACE_TYPE_PARTIAL_LINE;
4495 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04004496
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004497 if (iter->trace && iter->trace->print_line) {
4498 ret = iter->trace->print_line(iter);
4499 if (ret != TRACE_TYPE_UNHANDLED)
4500 return ret;
4501 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02004502
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05004503 if (iter->ent->type == TRACE_BPUTS &&
4504 trace_flags & TRACE_ITER_PRINTK &&
4505 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4506 return trace_print_bputs_msg_only(iter);
4507
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004508 if (iter->ent->type == TRACE_BPRINT &&
4509 trace_flags & TRACE_ITER_PRINTK &&
4510 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004511 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01004512
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004513 if (iter->ent->type == TRACE_PRINT &&
4514 trace_flags & TRACE_ITER_PRINTK &&
4515 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04004516 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01004517
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02004518 if (trace_flags & TRACE_ITER_BIN)
4519 return print_bin_fmt(iter);
4520
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02004521 if (trace_flags & TRACE_ITER_HEX)
4522 return print_hex_fmt(iter);
4523
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004524 if (trace_flags & TRACE_ITER_RAW)
4525 return print_raw_fmt(iter);
4526
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004527 return print_trace_fmt(iter);
4528}
4529
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004530void trace_latency_header(struct seq_file *m)
4531{
4532 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004533 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004534
4535 /* print nothing if the buffers are empty */
4536 if (trace_empty(iter))
4537 return;
4538
4539 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4540 print_trace_header(m, iter);
4541
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004542 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01004543 print_lat_help_header(m);
4544}
4545
Jiri Olsa62b915f2010-04-02 19:01:22 +02004546void trace_default_header(struct seq_file *m)
4547{
4548 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004549 struct trace_array *tr = iter->tr;
4550 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02004551
Jiri Olsaf56e7f82011-06-03 16:58:49 +02004552 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4553 return;
4554
Jiri Olsa62b915f2010-04-02 19:01:22 +02004555 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4556 /* print nothing if the buffers are empty */
4557 if (trace_empty(iter))
4558 return;
4559 print_trace_header(m, iter);
4560 if (!(trace_flags & TRACE_ITER_VERBOSE))
4561 print_lat_help_header(m);
4562 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05004563 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4564 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004565 print_func_help_header_irq(iter->array_buffer,
Joel Fernandes441dae82017-06-25 22:38:43 -07004566 m, trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004567 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004568 print_func_help_header(iter->array_buffer, m,
Joel Fernandes441dae82017-06-25 22:38:43 -07004569 trace_flags);
Steven Rostedt77271ce2011-11-17 09:34:33 -05004570 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02004571 }
4572}
4573
Steven Rostedte0a413f2011-09-29 21:26:16 -04004574static void test_ftrace_alive(struct seq_file *m)
4575{
4576 if (!ftrace_is_dead())
4577 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004578 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4579 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004580}
4581
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004582#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004583static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004584{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004585 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4586 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4587 "# Takes a snapshot of the main buffer.\n"
4588 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4589 "# (Doesn't have to be '2' works with any number that\n"
4590 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004591}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004592
4593static void show_snapshot_percpu_help(struct seq_file *m)
4594{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004595 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004596#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004597 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4598 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004599#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004600 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4601 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004602#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01004603 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4604 "# (Doesn't have to be '2' works with any number that\n"
4605 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004606}
4607
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004608static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4609{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004610 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004611 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004612 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004613 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004614
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01004615 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004616 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4617 show_snapshot_main_help(m);
4618 else
4619 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004620}
4621#else
4622/* Should never be called */
4623static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4624#endif
4625
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004626static int s_show(struct seq_file *m, void *v)
4627{
4628 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004629 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004630
4631 if (iter->ent == NULL) {
4632 if (iter->tr) {
4633 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4634 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04004635 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004636 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05004637 if (iter->snapshot && trace_empty(iter))
4638 print_snapshot_help(m, iter);
4639 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004640 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02004641 else
4642 trace_default_header(m);
4643
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004644 } else if (iter->leftover) {
4645 /*
4646 * If we filled the seq_file buffer earlier, we
4647 * want to just show it now.
4648 */
4649 ret = trace_print_seq(m, &iter->seq);
4650
4651 /* ret should this time be zero, but you never know */
4652 iter->leftover = ret;
4653
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004654 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004655 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05004656 ret = trace_print_seq(m, &iter->seq);
4657 /*
4658 * If we overflow the seq_file buffer, then it will
4659 * ask us for this data again at start up.
4660 * Use that instead.
4661 * ret is 0 if seq_file write succeeded.
4662 * -1 otherwise.
4663 */
4664 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004665 }
4666
4667 return 0;
4668}
4669
Oleg Nesterov649e9c702013-07-23 17:25:54 +02004670/*
4671 * Should be used after trace_array_get(), trace_types_lock
4672 * ensures that i_cdev was already initialized.
4673 */
4674static inline int tracing_get_cpu(struct inode *inode)
4675{
4676 if (inode->i_cdev) /* See trace_create_cpu_file() */
4677 return (long)inode->i_cdev - 1;
4678 return RING_BUFFER_ALL_CPUS;
4679}
4680
James Morris88e9d342009-09-22 16:43:43 -07004681static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004682 .start = s_start,
4683 .next = s_next,
4684 .stop = s_stop,
4685 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004686};
4687
Ingo Molnare309b412008-05-12 21:20:51 +02004688static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02004689__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004690{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004691 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004692 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02004693 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004694
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004695 if (tracing_disabled)
4696 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02004697
Jiri Olsa50e18b92012-04-25 10:23:39 +02004698 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004699 if (!iter)
4700 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004701
Gil Fruchter72917232015-06-09 10:32:35 +03004702 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04004703 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004704 if (!iter->buffer_iter)
4705 goto release;
4706
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004707 /*
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004708 * trace_find_next_entry() may need to save off iter->ent.
4709 * It will place it into the iter->temp buffer. As most
4710 * events are less than 128, allocate a buffer of that size.
4711 * If one is greater, then trace_find_next_entry() will
4712 * allocate a new buffer to adjust for the bigger iter->ent.
4713 * It's not critical if it fails to get allocated here.
4714 */
4715 iter->temp = kmalloc(128, GFP_KERNEL);
4716 if (iter->temp)
4717 iter->temp_size = 128;
4718
4719 /*
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09004720 * trace_event_printf() may need to modify given format
4721 * string to replace %p with %px so that it shows real address
4722 * instead of hash value. However, that is only for the event
4723 * tracing, other tracer may not need. Defer the allocation
4724 * until it is needed.
4725 */
4726 iter->fmt = NULL;
4727 iter->fmt_size = 0;
4728
4729 /*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004730 * We make a copy of the current tracer to avoid concurrent
4731 * changes on it while we are reading.
4732 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004733 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004734 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004735 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004736 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004737
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004738 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004739
Li Zefan79f55992009-06-15 14:58:26 +08004740 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004741 goto fail;
4742
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004743 iter->tr = tr;
4744
4745#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004746 /* Currently only the top directory has a snapshot */
4747 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004748 iter->array_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004749 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004750#endif
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004751 iter->array_buffer = &tr->array_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004752 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004753 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02004754 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004755 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004756
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004757 /* Notify the tracer early; before we stop tracing. */
Dan Carpenterb3f7a6c2014-11-22 21:30:12 +03004758 if (iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01004759 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01004760
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004761 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004762 if (ring_buffer_overruns(iter->array_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05004763 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4764
David Sharp8be07092012-11-13 12:18:22 -08004765 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004766 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004767 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4768
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004769 /*
4770 * If pause-on-trace is enabled, then stop the trace while
4771 * dumping, unless this is the "snapshot" file
4772 */
4773 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004774 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004775
Steven Rostedtae3b5092013-01-23 15:22:59 -05004776 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004777 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004778 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004779 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004780 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004781 }
4782 ring_buffer_read_prepare_sync();
4783 for_each_tracing_cpu(cpu) {
4784 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004785 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004786 }
4787 } else {
4788 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004789 iter->buffer_iter[cpu] =
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004790 ring_buffer_read_prepare(iter->array_buffer->buffer,
Douglas Anderson31b265b2019-03-08 11:32:04 -08004791 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07004792 ring_buffer_read_prepare_sync();
4793 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04004794 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004795 }
4796
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004797 mutex_unlock(&trace_types_lock);
4798
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004799 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004800
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004801 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004802 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004803 kfree(iter->trace);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004804 kfree(iter->temp);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004805 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03004806release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02004807 seq_release_private(inode, file);
4808 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004809}
4810
4811int tracing_open_generic(struct inode *inode, struct file *filp)
4812{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004813 int ret;
4814
4815 ret = tracing_check_open_get_tr(NULL);
4816 if (ret)
4817 return ret;
Steven Rostedt60a11772008-05-12 21:20:44 +02004818
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004819 filp->private_data = inode->i_private;
4820 return 0;
4821}
4822
Geyslan G. Bem2e864212013-10-18 21:15:54 -03004823bool tracing_is_disabled(void)
4824{
4825 return (tracing_disabled) ? true: false;
4826}
4827
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004828/*
4829 * Open and update trace_array ref count.
4830 * Must have the current trace_array passed to it.
4831 */
Steven Rostedt (VMware)aa07d712019-10-11 19:12:21 -04004832int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004833{
4834 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004835 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004836
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004837 ret = tracing_check_open_get_tr(tr);
4838 if (ret)
4839 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004840
4841 filp->private_data = inode->i_private;
4842
4843 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004844}
4845
John Keeping2972e302021-12-07 14:25:58 +00004846static int tracing_mark_open(struct inode *inode, struct file *filp)
4847{
4848 stream_open(inode, filp);
4849 return tracing_open_generic_tr(inode, filp);
4850}
4851
Hannes Eder4fd27352009-02-10 19:44:12 +01004852static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004853{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004854 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07004855 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004856 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004857 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004858
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004859 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004860 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004861 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004862 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004863
Oleg Nesterov6484c712013-07-23 17:26:10 +02004864 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004865 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004866 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004867
Steven Rostedt3928a8a2008-09-29 23:02:41 -04004868 for_each_tracing_cpu(cpu) {
4869 if (iter->buffer_iter[cpu])
4870 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4871 }
4872
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004873 if (iter->trace && iter->trace->close)
4874 iter->trace->close(iter);
4875
Steven Rostedt (VMware)06e0a542020-03-17 17:32:31 -04004876 if (!iter->snapshot && tr->stop_count)
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004877 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004878 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004879
4880 __trace_array_put(tr);
4881
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004882 mutex_unlock(&trace_types_lock);
4883
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004884 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02004885 free_cpumask_var(iter->started);
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09004886 kfree(iter->fmt);
Steven Rostedt (VMware)ff895102020-03-17 17:32:23 -04004887 kfree(iter->temp);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004888 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04004889 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02004890 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004891
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004892 return 0;
4893}
4894
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004895static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4896{
4897 struct trace_array *tr = inode->i_private;
4898
4899 trace_array_put(tr);
4900 return 0;
4901}
4902
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004903static int tracing_single_release_tr(struct inode *inode, struct file *file)
4904{
4905 struct trace_array *tr = inode->i_private;
4906
4907 trace_array_put(tr);
4908
4909 return single_release(inode, file);
4910}
4911
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004912static int tracing_open(struct inode *inode, struct file *file)
4913{
Oleg Nesterov6484c712013-07-23 17:26:10 +02004914 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05004915 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004916 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004917
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04004918 ret = tracing_check_open_get_tr(tr);
4919 if (ret)
4920 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004921
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004922 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02004923 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4924 int cpu = tracing_get_cpu(inode);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05004925 struct array_buffer *trace_buf = &tr->array_buffer;
Bo Yan8dd33bc2017-09-18 10:03:35 -07004926
4927#ifdef CONFIG_TRACER_MAX_TRACE
4928 if (tr->current_trace->print_max)
4929 trace_buf = &tr->max_buffer;
4930#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02004931
4932 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan8dd33bc2017-09-18 10:03:35 -07004933 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004934 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04004935 tracing_reset_cpu(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004936 }
4937
4938 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02004939 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004940 if (IS_ERR(iter))
4941 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004942 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04004943 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4944 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04004945
4946 if (ret < 0)
4947 trace_array_put(tr);
4948
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004949 return ret;
4950}
4951
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004952/*
4953 * Some tracers are not suitable for instance buffers.
4954 * A tracer is always available for the global array (toplevel)
4955 * or if it explicitly states that it is.
4956 */
4957static bool
4958trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4959{
4960 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4961}
4962
4963/* Find the next tracer that this trace array may use */
4964static struct tracer *
4965get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4966{
4967 while (t && !trace_ok_for_array(t, tr))
4968 t = t->next;
4969
4970 return t;
4971}
4972
Ingo Molnare309b412008-05-12 21:20:51 +02004973static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004974t_next(struct seq_file *m, void *v, loff_t *pos)
4975{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004976 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004977 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004978
4979 (*pos)++;
4980
4981 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004982 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004983
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004984 return t;
4985}
4986
4987static void *t_start(struct seq_file *m, loff_t *pos)
4988{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004989 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08004990 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004991 loff_t l = 0;
4992
4993 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004994
4995 t = get_tracer_for_array(tr, trace_types);
4996 for (; t && l < *pos; t = t_next(m, t, &l))
4997 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004998
4999 return t;
5000}
5001
5002static void t_stop(struct seq_file *m, void *p)
5003{
5004 mutex_unlock(&trace_types_lock);
5005}
5006
5007static int t_show(struct seq_file *m, void *v)
5008{
5009 struct tracer *t = v;
5010
5011 if (!t)
5012 return 0;
5013
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005014 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005015 if (t->next)
5016 seq_putc(m, ' ');
5017 else
5018 seq_putc(m, '\n');
5019
5020 return 0;
5021}
5022
James Morris88e9d342009-09-22 16:43:43 -07005023static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005024 .start = t_start,
5025 .next = t_next,
5026 .stop = t_stop,
5027 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005028};
5029
5030static int show_traces_open(struct inode *inode, struct file *file)
5031{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005032 struct trace_array *tr = inode->i_private;
5033 struct seq_file *m;
5034 int ret;
5035
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005036 ret = tracing_check_open_get_tr(tr);
5037 if (ret)
5038 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005039
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005040 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005041 if (ret) {
5042 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005043 return ret;
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005044 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05005045
5046 m = file->private_data;
5047 m->private = tr;
5048
5049 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005050}
5051
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005052static int show_traces_release(struct inode *inode, struct file *file)
5053{
5054 struct trace_array *tr = inode->i_private;
5055
5056 trace_array_put(tr);
5057 return seq_release(inode, file);
5058}
5059
Steven Rostedt4acd4d02009-03-18 10:40:24 -04005060static ssize_t
5061tracing_write_stub(struct file *filp, const char __user *ubuf,
5062 size_t count, loff_t *ppos)
5063{
5064 return count;
5065}
5066
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005067loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08005068{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005069 int ret;
5070
Slava Pestov364829b2010-11-24 15:13:16 -08005071 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005072 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08005073 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005074 file->f_pos = ret = 0;
5075
5076 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08005077}
5078
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005079static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005080 .open = tracing_open,
5081 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04005082 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005083 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005084 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005085};
5086
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005087static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005088 .open = show_traces_open,
5089 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005090 .llseek = seq_lseek,
Steven Rostedt (VMware)194c2c72019-10-11 18:19:17 -04005091 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02005092};
5093
5094static ssize_t
5095tracing_cpumask_read(struct file *filp, char __user *ubuf,
5096 size_t count, loff_t *ppos)
5097{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005098 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Du90e406f2017-11-30 11:39:43 +08005099 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005100 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02005101
Changbin Du90e406f2017-11-30 11:39:43 +08005102 len = snprintf(NULL, 0, "%*pb\n",
5103 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5104 mask_str = kmalloc(len, GFP_KERNEL);
5105 if (!mask_str)
5106 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005107
Changbin Du90e406f2017-11-30 11:39:43 +08005108 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08005109 cpumask_pr_args(tr->tracing_cpumask));
5110 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02005111 count = -EINVAL;
5112 goto out_err;
5113 }
Changbin Du90e406f2017-11-30 11:39:43 +08005114 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005115
5116out_err:
Changbin Du90e406f2017-11-30 11:39:43 +08005117 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005118
5119 return count;
5120}
5121
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005122int tracing_set_cpumask(struct trace_array *tr,
5123 cpumask_var_t tracing_cpumask_new)
Ingo Molnarc7078de2008-05-12 21:20:52 +02005124{
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005125 int cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305126
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005127 if (!tr)
5128 return -EINVAL;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005129
Steven Rostedta5e25882008-12-02 15:34:05 -05005130 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05005131 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02005132 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02005133 /*
5134 * Increase/decrease the disabled counter if we are
5135 * about to flip a bit in the cpumask:
5136 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005137 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305138 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005139 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5140 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005141 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005142 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305143 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005144 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5145 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005146 }
5147 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05005148 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05005149 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02005150
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005151 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005152
5153 return 0;
5154}
5155
5156static ssize_t
5157tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5158 size_t count, loff_t *ppos)
5159{
5160 struct trace_array *tr = file_inode(filp)->i_private;
5161 cpumask_var_t tracing_cpumask_new;
5162 int err;
5163
Tetsuo Handac5e3a412021-04-01 14:58:23 +09005164 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005165 return -ENOMEM;
5166
5167 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5168 if (err)
5169 goto err_free;
5170
5171 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5172 if (err)
5173 goto err_free;
5174
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305175 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005176
Ingo Molnarc7078de2008-05-12 21:20:52 +02005177 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02005178
Masami Hiramatsu9d15dbb2020-01-11 01:07:16 +09005179err_free:
Li Zefan215368e2009-06-15 10:56:42 +08005180 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02005181
5182 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02005183}
5184
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005185static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005186 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02005187 .read = tracing_cpumask_read,
5188 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07005189 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005190 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005191};
5192
Li Zefanfdb372e2009-12-08 11:15:59 +08005193static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005194{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005195 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005196 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005197 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005198 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005199
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005200 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005201 tracer_flags = tr->current_trace->flags->val;
5202 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005203
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005204 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005205 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08005206 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005207 else
Li Zefanfdb372e2009-12-08 11:15:59 +08005208 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005209 }
5210
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005211 for (i = 0; trace_opts[i].name; i++) {
5212 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08005213 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005214 else
Li Zefanfdb372e2009-12-08 11:15:59 +08005215 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005216 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05005217 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005218
Li Zefanfdb372e2009-12-08 11:15:59 +08005219 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005220}
5221
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005222static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08005223 struct tracer_flags *tracer_flags,
5224 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005225{
Chunyu Hud39cdd22016-03-08 21:37:01 +08005226 struct tracer *trace = tracer_flags->trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005227 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005228
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005229 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005230 if (ret)
5231 return ret;
5232
5233 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08005234 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005235 else
Zhaolei77708412009-08-07 18:53:21 +08005236 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005237 return 0;
5238}
5239
Li Zefan8d18eaa2009-12-08 11:17:06 +08005240/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005241static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08005242{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005243 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005244 struct tracer_flags *tracer_flags = trace->flags;
5245 struct tracer_opt *opts = NULL;
5246 int i;
5247
5248 for (i = 0; tracer_flags->opts[i].name; i++) {
5249 opts = &tracer_flags->opts[i];
5250
5251 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005252 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005253 }
5254
5255 return -EINVAL;
5256}
5257
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005258/* Some tracers require overwrite to stay enabled */
5259int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5260{
5261 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5262 return -1;
5263
5264 return 0;
5265}
5266
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005267int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005268{
Paul Burton4030a6e2021-07-01 10:24:07 -07005269 int *map;
5270
Prateek Sood3a53acf2019-12-10 09:15:16 +00005271 if ((mask == TRACE_ITER_RECORD_TGID) ||
5272 (mask == TRACE_ITER_RECORD_CMD))
5273 lockdep_assert_held(&event_mutex);
5274
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005275 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005276 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005277 return 0;
5278
5279 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005280 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05005281 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005282 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005283
5284 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005285 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005286 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005287 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08005288
5289 if (mask == TRACE_ITER_RECORD_CMD)
5290 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08005291
Joel Fernandesd914ba32017-06-26 19:01:55 -07005292 if (mask == TRACE_ITER_RECORD_TGID) {
Paul Burton4030a6e2021-07-01 10:24:07 -07005293 if (!tgid_map) {
5294 tgid_map_max = pid_max;
5295 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5296 GFP_KERNEL);
5297
5298 /*
5299 * Pairs with smp_load_acquire() in
5300 * trace_find_tgid_ptr() to ensure that if it observes
5301 * the tgid_map we just allocated then it also observes
5302 * the corresponding tgid_map_max value.
5303 */
5304 smp_store_release(&tgid_map, map);
5305 }
Joel Fernandesd914ba32017-06-26 19:01:55 -07005306 if (!tgid_map) {
5307 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5308 return -ENOMEM;
5309 }
5310
5311 trace_event_enable_tgid_record(enabled);
5312 }
5313
Steven Rostedtc37775d2016-04-13 16:59:18 -04005314 if (mask == TRACE_ITER_EVENT_FORK)
5315 trace_event_follow_fork(tr, enabled);
5316
Namhyung Kim1e104862017-04-17 11:44:28 +09005317 if (mask == TRACE_ITER_FUNC_FORK)
5318 ftrace_pid_follow_fork(tr, enabled);
5319
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005320 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05005321 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005322#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005323 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04005324#endif
5325 }
Steven Rostedt81698832012-10-11 10:15:05 -04005326
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04005327 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04005328 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04005329 trace_printk_control(enabled);
5330 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005331
5332 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04005333}
5334
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09005335int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005336{
Li Zefan8d18eaa2009-12-08 11:17:06 +08005337 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005338 int neg = 0;
Yisheng Xie591a0332018-05-17 16:36:03 +08005339 int ret;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005340 size_t orig_len = strlen(option);
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005341 int len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005342
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005343 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005344
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005345 len = str_has_prefix(cmp, "no");
5346 if (len)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005347 neg = 1;
Steven Rostedt (VMware)3d739c12018-12-21 23:10:26 -05005348
5349 cmp += len;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005350
Prateek Sood3a53acf2019-12-10 09:15:16 +00005351 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005352 mutex_lock(&trace_types_lock);
5353
Yisheng Xie591a0332018-05-17 16:36:03 +08005354 ret = match_string(trace_options, -1, cmp);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01005355 /* If no option could be set, test the specific tracer options */
Yisheng Xie591a0332018-05-17 16:36:03 +08005356 if (ret < 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005357 ret = set_tracer_option(tr, cmp, neg);
Yisheng Xie591a0332018-05-17 16:36:03 +08005358 else
5359 ret = set_tracer_flag(tr, 1 << ret, !neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005360
5361 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00005362 mutex_unlock(&event_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005363
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005364 /*
5365 * If the first trailing whitespace is replaced with '\0' by strstrip,
5366 * turn it back into a space.
5367 */
5368 if (orig_len > strlen(option))
5369 option[strlen(option)] = ' ';
5370
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005371 return ret;
5372}
5373
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005374static void __init apply_trace_boot_options(void)
5375{
5376 char *buf = trace_boot_options_buf;
5377 char *option;
5378
5379 while (true) {
5380 option = strsep(&buf, ",");
5381
5382 if (!option)
5383 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005384
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05005385 if (*option)
5386 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08005387
5388 /* Put back the comma to allow this to be called again */
5389 if (buf)
5390 *(buf - 1) = ',';
5391 }
5392}
5393
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005394static ssize_t
5395tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5396 size_t cnt, loff_t *ppos)
5397{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005398 struct seq_file *m = filp->private_data;
5399 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005400 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005401 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005402
5403 if (cnt >= sizeof(buf))
5404 return -EINVAL;
5405
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08005406 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005407 return -EFAULT;
5408
Steven Rostedta8dd2172013-01-09 20:54:17 -05005409 buf[cnt] = 0;
5410
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005411 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005412 if (ret < 0)
5413 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005414
Jiri Olsacf8517c2009-10-23 19:36:16 -04005415 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005416
5417 return cnt;
5418}
5419
Li Zefanfdb372e2009-12-08 11:15:59 +08005420static int tracing_trace_options_open(struct inode *inode, struct file *file)
5421{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005422 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005423 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005424
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005425 ret = tracing_check_open_get_tr(tr);
5426 if (ret)
5427 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005428
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005429 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5430 if (ret < 0)
5431 trace_array_put(tr);
5432
5433 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08005434}
5435
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005436static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08005437 .open = tracing_trace_options_open,
5438 .read = seq_read,
5439 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005440 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05005441 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005442};
5443
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005444static const char readme_msg[] =
5445 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005446 "# echo 0 > tracing_on : quick way to disable tracing\n"
5447 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5448 " Important files:\n"
5449 " trace\t\t\t- The static contents of the buffer\n"
5450 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5451 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5452 " current_tracer\t- function and latency tracers\n"
5453 " available_tracers\t- list of configured tracers for current_tracer\n"
Tom Zanussia8d65572019-03-31 18:48:25 -05005454 " error_log\t- error log for failed commands (that support it)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005455 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5456 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5457 " trace_clock\t\t-change the clock used to order events\n"
5458 " local: Per cpu clock but may not be synced across CPUs\n"
5459 " global: Synced across CPUs but slows tracing down.\n"
5460 " counter: Not a clock, but just an increment\n"
5461 " uptime: Jiffy counter from time of boot\n"
5462 " perf: Same clock that perf events use\n"
5463#ifdef CONFIG_X86_64
5464 " x86-tsc: TSC cycle counter\n"
5465#endif
Tom Zanussi2c1ea602018-01-15 20:51:41 -06005466 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5467 " delta: Delta difference against a buffer-wide timestamp\n"
5468 " absolute: Absolute (standalone) timestamp\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005469 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
Steven Rostedtfa32e852016-07-06 15:25:08 -04005470 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005471 " tracing_cpumask\t- Limit which CPUs to trace\n"
5472 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5473 "\t\t\t Remove sub-buffer with rmdir\n"
5474 " trace_options\t\t- Set format or modify how tracing happens\n"
Srivatsa S. Bhat (VMware)b9416992019-01-28 17:55:53 -08005475 "\t\t\t Disable an option by prefixing 'no' to the\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005476 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005477 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005478#ifdef CONFIG_DYNAMIC_FTRACE
5479 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005480 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5481 "\t\t\t functions\n"
Masami Hiramatsu60f1d5e2016-10-05 20:58:15 +09005482 "\t accepts: func_full_name or glob-matching-pattern\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005483 "\t modules: Can select a group via module\n"
5484 "\t Format: :mod:<module-name>\n"
5485 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5486 "\t triggers: a command to perform when function is hit\n"
5487 "\t Format: <function>:<trigger>[:count]\n"
5488 "\t trigger: traceon, traceoff\n"
5489 "\t\t enable_event:<system>:<event>\n"
5490 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005491#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005492 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005493#endif
5494#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005495 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005496#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04005497 "\t\t dump\n"
5498 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005499 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5500 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5501 "\t The first one will disable tracing every time do_fault is hit\n"
5502 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5503 "\t The first time do trap is hit and it disables tracing, the\n"
5504 "\t counter will decrement to 2. If tracing is already disabled,\n"
5505 "\t the counter will not decrement. It only decrements when the\n"
5506 "\t trigger did work\n"
5507 "\t To remove trigger without count:\n"
5508 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5509 "\t To remove trigger with a count:\n"
5510 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005511 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005512 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5513 "\t modules: Can select a group via module command :mod:\n"
5514 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005515#endif /* CONFIG_DYNAMIC_FTRACE */
5516#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005517 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5518 "\t\t (function)\n"
Steven Rostedt (VMware)b3b1e6e2020-03-19 23:19:06 -04005519 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5520 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005521#endif
5522#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5523 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09005524 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005525 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5526#endif
5527#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005528 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5529 "\t\t\t snapshot buffer. Read the contents for more\n"
5530 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005531#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005532#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005533 " stack_trace\t\t- Shows the max stack trace when active\n"
5534 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005535 "\t\t\t Write into this file to reset the max size (trigger a\n"
5536 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005537#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005538 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5539 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04005540#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08005541#endif /* CONFIG_STACK_TRACER */
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005542#ifdef CONFIG_DYNAMIC_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005543 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
Masami Hiramatsu5448d442018-11-05 18:02:08 +09005544 "\t\t\t Write into this file to define/undefine new trace events.\n"
5545#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005546#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsuca89bc02019-06-20 00:07:49 +09005547 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005548 "\t\t\t Write into this file to define/undefine new trace events.\n"
5549#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005550#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu41af3cf2019-06-20 00:07:58 +09005551 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005552 "\t\t\t Write into this file to define/undefine new trace events.\n"
5553#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005554#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
Masami Hiramatsu86425622016-08-18 17:58:15 +09005555 "\t accepts: event-definitions (one definition per line)\n"
Masami Hiramatsuc3ca46e2017-05-23 15:05:50 +09005556 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5557 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005558#ifdef CONFIG_HIST_TRIGGERS
5559 "\t s:[synthetic/]<event> <field> [<field>]\n"
5560#endif
Tzvetomir Stoyanov (VMware)7491e2c2021-08-19 11:26:06 -04005561 "\t e[:[<group>/]<event>] <attached-group>.<attached-event> [<args>]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005562 "\t -:[<group>/]<event>\n"
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005563#ifdef CONFIG_KPROBE_EVENTS
Masami Hiramatsu86425622016-08-18 17:58:15 +09005564 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
Masami Hiramatsu4725cd82020-09-10 17:55:35 +09005565 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005566#endif
Anton Blanchard6b0b7552017-02-16 17:00:50 +11005567#ifdef CONFIG_UPROBE_EVENTS
Masami Hiramatsu3dd3aae2020-09-10 17:55:46 +09005568 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005569#endif
5570 "\t args: <name>=fetcharg[:type]\n"
Tzvetomir Stoyanov (VMware)7491e2c2021-08-19 11:26:06 -04005571 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005572#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005573 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005574#else
Masami Hiramatsue65f7ae2019-05-15 14:38:42 +09005575 "\t $stack<index>, $stack, $retval, $comm,\n"
Masami Hiramatsua1303af2018-04-25 21:21:26 +09005576#endif
Masami Hiramatsua42e3c42019-06-20 00:08:37 +09005577 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
Masami Hiramatsu60c2e0c2018-04-25 21:20:28 +09005578 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
Masami Hiramatsu88903c42019-05-15 14:38:30 +09005579 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
Masami Hiramatsu40b53b72018-04-25 21:21:55 +09005580 "\t <type>\\[<array-size>\\]\n"
Masami Hiramatsu7bbab382018-11-05 18:03:33 +09005581#ifdef CONFIG_HIST_TRIGGERS
5582 "\t field: <stype> <name>;\n"
5583 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5584 "\t [unsigned] char/int/long\n"
5585#endif
Tzvetomir Stoyanov (VMware)7491e2c2021-08-19 11:26:06 -04005586 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5587 "\t of the <attached-group>/<attached-event>.\n"
Masami Hiramatsu86425622016-08-18 17:58:15 +09005588#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005589 " events/\t\t- Directory containing all trace event subsystems:\n"
5590 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5591 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005592 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5593 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005594 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005595 " events/<system>/<event>/\t- Directory containing control files for\n"
5596 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005597 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5598 " filter\t\t- If set, only events passing filter are traced\n"
5599 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005600 "\t Format: <trigger>[:count][if <filter>]\n"
5601 "\t trigger: traceon, traceoff\n"
5602 "\t enable_event:<system>:<event>\n"
5603 "\t disable_event:<system>:<event>\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005604#ifdef CONFIG_HIST_TRIGGERS
5605 "\t enable_hist:<system>:<event>\n"
5606 "\t disable_hist:<system>:<event>\n"
5607#endif
Tom Zanussi26f25562014-01-17 15:11:44 -06005608#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005609 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005610#endif
5611#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005612 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06005613#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005614#ifdef CONFIG_HIST_TRIGGERS
5615 "\t\t hist (see below)\n"
5616#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05005617 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5618 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5619 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5620 "\t events/block/block_unplug/trigger\n"
5621 "\t The first disables tracing every time block_unplug is hit.\n"
5622 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5623 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5624 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5625 "\t Like function triggers, the counter is only decremented if it\n"
5626 "\t enabled or disabled tracing.\n"
5627 "\t To remove a trigger without a count:\n"
5628 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5629 "\t To remove a trigger with a count:\n"
5630 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5631 "\t Filters can be ignored when removing a trigger.\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005632#ifdef CONFIG_HIST_TRIGGERS
5633 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
Tom Zanussi76a3b0c2016-03-03 12:54:44 -06005634 "\t Format: hist:keys=<field1[,field2,...]>\n"
Kalesh Singh6a6e5ef2021-10-29 11:33:29 -07005635 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005636 "\t [:values=<field1[,field2,...]>]\n"
Tom Zanussie62347d2016-03-03 12:54:45 -06005637 "\t [:sort=<field1[,field2,...]>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005638 "\t [:size=#entries]\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005639 "\t [:pause][:continue][:clear]\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005640 "\t [:name=histname1]\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005641 "\t [:<handler>.<action>]\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005642 "\t [if <filter>]\n\n"
Steven Rostedt (VMware)1e3bac72021-07-21 11:00:53 -04005643 "\t Note, special fields can be used as well:\n"
5644 "\t common_timestamp - to record current timestamp\n"
5645 "\t common_cpu - to record the CPU the event happened on\n"
5646 "\n"
Kalesh Singh6a6e5ef2021-10-29 11:33:29 -07005647 "\t A hist trigger variable can be:\n"
5648 "\t - a reference to a field e.g. x=current_timestamp,\n"
5649 "\t - a reference to another variable e.g. y=$x,\n"
5650 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5651 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5652 "\n"
Colin Ian Kingf2b20c62021-11-08 20:15:13 +00005653 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
Kalesh Singh6a6e5ef2021-10-29 11:33:29 -07005654 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5655 "\t variable reference, field or numeric literal.\n"
5656 "\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005657 "\t When a matching event is hit, an entry is added to a hash\n"
Tom Zanussif2606832016-03-03 12:54:43 -06005658 "\t table using the key(s) and value(s) named, and the value of a\n"
5659 "\t sum called 'hitcount' is incremented. Keys and values\n"
5660 "\t correspond to fields in the event's format description. Keys\n"
Tom Zanussi69a02002016-03-03 12:54:52 -06005661 "\t can be any field, or the special string 'stacktrace'.\n"
5662 "\t Compound keys consisting of up to two fields can be specified\n"
5663 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5664 "\t fields. Sort keys consisting of up to two fields can be\n"
5665 "\t specified using the 'sort' keyword. The sort direction can\n"
5666 "\t be modified by appending '.descending' or '.ascending' to a\n"
5667 "\t sort field. The 'size' parameter can be used to specify more\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005668 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5669 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5670 "\t its histogram data will be shared with other triggers of the\n"
5671 "\t same name, and trigger hits will update this common data.\n\n"
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005672 "\t Reading the 'hist' file for the event will dump the hash\n"
Tom Zanussi52a7f162016-03-03 12:54:57 -06005673 "\t table in its entirety to stdout. If there are multiple hist\n"
5674 "\t triggers attached to an event, there will be a table for each\n"
Tom Zanussi5463bfd2016-03-03 12:54:59 -06005675 "\t trigger in the output. The table displayed for a named\n"
5676 "\t trigger will be the same as any other instance having the\n"
5677 "\t same name. The default format used to display a given field\n"
5678 "\t can be modified by appending any of the following modifiers\n"
5679 "\t to the field name, as applicable:\n\n"
Tom Zanussic6afad42016-03-03 12:54:49 -06005680 "\t .hex display a number as a hex value\n"
5681 "\t .sym display an address as a symbol\n"
Tom Zanussi6b4827a2016-03-03 12:54:50 -06005682 "\t .sym-offset display an address as a symbol and offset\n"
Tom Zanussi31696192016-03-03 12:54:51 -06005683 "\t .execname display a common_pid as a program name\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005684 "\t .syscall display a syscall id as a syscall name\n"
5685 "\t .log2 display log2 value rather than raw number\n"
Steven Rostedt (VMware)37036432021-07-07 17:36:25 -04005686 "\t .buckets=size display values in groups of size rather than raw number\n"
Tom Zanussi860f9f62018-01-15 20:51:48 -06005687 "\t .usecs display a common_timestamp in microseconds\n\n"
Tom Zanussi83e99912016-03-03 12:54:46 -06005688 "\t The 'pause' parameter can be used to pause an existing hist\n"
5689 "\t trigger or to start a hist trigger but not log any events\n"
5690 "\t until told to do so. 'continue' can be used to start or\n"
5691 "\t restart a paused hist trigger.\n\n"
Tom Zanussie86ae9b2016-03-03 12:54:47 -06005692 "\t The 'clear' parameter will clear the contents of a running\n"
5693 "\t hist trigger and leave its current paused/active state\n"
5694 "\t unchanged.\n\n"
Tom Zanussid0bad492016-03-03 12:54:55 -06005695 "\t The enable_hist and disable_hist triggers can be used to\n"
5696 "\t have one event conditionally start and stop another event's\n"
Colin Ian King9e5a36a2019-02-17 22:32:22 +00005697 "\t already-attached hist trigger. The syntax is analogous to\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005698 "\t the enable_event and disable_event triggers.\n\n"
5699 "\t Hist trigger handlers and actions are executed whenever a\n"
5700 "\t a histogram entry is added or updated. They take the form:\n\n"
5701 "\t <handler>.<action>\n\n"
5702 "\t The available handlers are:\n\n"
5703 "\t onmatch(matching.event) - invoke on addition or update\n"
Tom Zanussidff81f52019-02-13 17:42:48 -06005704 "\t onmax(var) - invoke if var exceeds current max\n"
5705 "\t onchange(var) - invoke action if var changes\n\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005706 "\t The available actions are:\n\n"
Tom Zanussie91eefd72019-02-13 17:42:50 -06005707 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
Tom Zanussic3e49502019-02-13 17:42:43 -06005708 "\t save(field,...) - save current event fields\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005709#ifdef CONFIG_TRACER_SNAPSHOT
Tom Zanussi1bc36bd2020-10-04 17:14:07 -05005710 "\t snapshot() - snapshot the trace buffer\n\n"
5711#endif
5712#ifdef CONFIG_SYNTH_EVENTS
5713 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5714 "\t Write into this file to define/undefine new synthetic events.\n"
5715 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
Tom Zanussia3785b72019-02-13 17:42:46 -06005716#endif
Tom Zanussi7ef224d2016-03-03 12:54:42 -06005717#endif
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005718;
5719
5720static ssize_t
5721tracing_readme_read(struct file *filp, char __user *ubuf,
5722 size_t cnt, loff_t *ppos)
5723{
5724 return simple_read_from_buffer(ubuf, cnt, ppos,
5725 readme_msg, strlen(readme_msg));
5726}
5727
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005728static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02005729 .open = tracing_open_generic,
5730 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005731 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005732};
5733
Michael Sartain99c621d2017-07-05 22:07:15 -06005734static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5735{
Paul Burtonb81b3e92021-06-29 17:34:05 -07005736 int pid = ++(*pos);
Michael Sartain99c621d2017-07-05 22:07:15 -06005737
Paul Burton4030a6e2021-07-01 10:24:07 -07005738 return trace_find_tgid_ptr(pid);
Michael Sartain99c621d2017-07-05 22:07:15 -06005739}
5740
5741static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5742{
Paul Burton4030a6e2021-07-01 10:24:07 -07005743 int pid = *pos;
Michael Sartain99c621d2017-07-05 22:07:15 -06005744
Paul Burton4030a6e2021-07-01 10:24:07 -07005745 return trace_find_tgid_ptr(pid);
Michael Sartain99c621d2017-07-05 22:07:15 -06005746}
5747
5748static void saved_tgids_stop(struct seq_file *m, void *v)
5749{
5750}
5751
5752static int saved_tgids_show(struct seq_file *m, void *v)
5753{
Paul Burtonb81b3e92021-06-29 17:34:05 -07005754 int *entry = (int *)v;
5755 int pid = entry - tgid_map;
5756 int tgid = *entry;
Michael Sartain99c621d2017-07-05 22:07:15 -06005757
Paul Burtonb81b3e92021-06-29 17:34:05 -07005758 if (tgid == 0)
5759 return SEQ_SKIP;
5760
5761 seq_printf(m, "%d %d\n", pid, tgid);
Michael Sartain99c621d2017-07-05 22:07:15 -06005762 return 0;
5763}
5764
5765static const struct seq_operations tracing_saved_tgids_seq_ops = {
5766 .start = saved_tgids_start,
5767 .stop = saved_tgids_stop,
5768 .next = saved_tgids_next,
5769 .show = saved_tgids_show,
5770};
5771
5772static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5773{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005774 int ret;
5775
5776 ret = tracing_check_open_get_tr(NULL);
5777 if (ret)
5778 return ret;
Michael Sartain99c621d2017-07-05 22:07:15 -06005779
5780 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5781}
5782
5783
5784static const struct file_operations tracing_saved_tgids_fops = {
5785 .open = tracing_saved_tgids_open,
5786 .read = seq_read,
5787 .llseek = seq_lseek,
5788 .release = seq_release,
5789};
5790
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005791static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005792{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005793 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005794
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005795 if (*pos || m->count)
5796 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005797
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005798 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005799
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005800 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5801 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005802 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04005803 continue;
5804
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005805 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005806 }
5807
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005808 return NULL;
5809}
Avadh Patel69abe6a2009-04-10 16:04:48 -04005810
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005811static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5812{
5813 void *v;
5814 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04005815
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005816 preempt_disable();
5817 arch_spin_lock(&trace_cmdline_lock);
5818
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005819 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005820 while (l <= *pos) {
5821 v = saved_cmdlines_next(m, v, &l);
5822 if (!v)
5823 return NULL;
5824 }
5825
5826 return v;
5827}
5828
5829static void saved_cmdlines_stop(struct seq_file *m, void *v)
5830{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005831 arch_spin_unlock(&trace_cmdline_lock);
5832 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005833}
5834
5835static int saved_cmdlines_show(struct seq_file *m, void *v)
5836{
5837 char buf[TASK_COMM_LEN];
5838 unsigned int *pid = v;
5839
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04005840 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005841 seq_printf(m, "%d %s\n", *pid, buf);
5842 return 0;
5843}
5844
5845static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5846 .start = saved_cmdlines_start,
5847 .next = saved_cmdlines_next,
5848 .stop = saved_cmdlines_stop,
5849 .show = saved_cmdlines_show,
5850};
5851
5852static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5853{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04005854 int ret;
5855
5856 ret = tracing_check_open_get_tr(NULL);
5857 if (ret)
5858 return ret;
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005859
5860 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04005861}
5862
5863static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09005864 .open = tracing_saved_cmdlines_open,
5865 .read = seq_read,
5866 .llseek = seq_lseek,
5867 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04005868};
5869
5870static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005871tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5872 size_t cnt, loff_t *ppos)
5873{
5874 char buf[64];
5875 int r;
5876
5877 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005878 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005879 arch_spin_unlock(&trace_cmdline_lock);
5880
5881 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5882}
5883
5884static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5885{
5886 kfree(s->saved_cmdlines);
5887 kfree(s->map_cmdline_to_pid);
5888 kfree(s);
5889}
5890
5891static int tracing_resize_saved_cmdlines(unsigned int val)
5892{
5893 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5894
Namhyung Kima6af8fb2014-06-10 16:11:35 +09005895 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09005896 if (!s)
5897 return -ENOMEM;
5898
5899 if (allocate_cmdlines_buffer(val, s) < 0) {
5900 kfree(s);
5901 return -ENOMEM;
5902 }
5903
5904 arch_spin_lock(&trace_cmdline_lock);
5905 savedcmd_temp = savedcmd;
5906 savedcmd = s;
5907 arch_spin_unlock(&trace_cmdline_lock);
5908 free_saved_cmdlines_buffer(savedcmd_temp);
5909
5910 return 0;
5911}
5912
5913static ssize_t
5914tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5915 size_t cnt, loff_t *ppos)
5916{
5917 unsigned long val;
5918 int ret;
5919
5920 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5921 if (ret)
5922 return ret;
5923
5924 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5925 if (!val || val > PID_MAX_DEFAULT)
5926 return -EINVAL;
5927
5928 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5929 if (ret < 0)
5930 return ret;
5931
5932 *ppos += cnt;
5933
5934 return cnt;
5935}
5936
5937static const struct file_operations tracing_saved_cmdlines_size_fops = {
5938 .open = tracing_open_generic,
5939 .read = tracing_saved_cmdlines_size_read,
5940 .write = tracing_saved_cmdlines_size_write,
5941};
5942
Jeremy Linton681bec02017-05-31 16:56:53 -05005943#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005944static union trace_eval_map_item *
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005945update_eval_map(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005946{
Jeremy Linton00f4b652017-05-31 16:56:43 -05005947 if (!ptr->map.eval_string) {
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005948 if (ptr->tail.next) {
5949 ptr = ptr->tail.next;
5950 /* Set ptr to the next real item (skip head) */
5951 ptr++;
5952 } else
5953 return NULL;
5954 }
5955 return ptr;
5956}
5957
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005958static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005959{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005960 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005961
5962 /*
5963 * Paranoid! If ptr points to end, we don't want to increment past it.
5964 * This really should never happen.
5965 */
Vasily Averin039958a2020-01-24 10:03:01 +03005966 (*pos)++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005967 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005968 if (WARN_ON_ONCE(!ptr))
5969 return NULL;
5970
5971 ptr++;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005972 ptr = update_eval_map(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005973
5974 return ptr;
5975}
5976
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005977static void *eval_map_start(struct seq_file *m, loff_t *pos)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005978{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005979 union trace_eval_map_item *v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005980 loff_t l = 0;
5981
Jeremy Linton1793ed92017-05-31 16:56:46 -05005982 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005983
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05005984 v = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005985 if (v)
5986 v++;
5987
5988 while (v && l < *pos) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005989 v = eval_map_next(m, v, &l);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005990 }
5991
5992 return v;
5993}
5994
Jeremy Lintonf57a4142017-05-31 16:56:48 -05005995static void eval_map_stop(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005996{
Jeremy Linton1793ed92017-05-31 16:56:46 -05005997 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04005998}
5999
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006000static int eval_map_show(struct seq_file *m, void *v)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006001{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006002 union trace_eval_map_item *ptr = v;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006003
6004 seq_printf(m, "%s %ld (%s)\n",
Jeremy Linton00f4b652017-05-31 16:56:43 -05006005 ptr->map.eval_string, ptr->map.eval_value,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006006 ptr->map.system);
6007
6008 return 0;
6009}
6010
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006011static const struct seq_operations tracing_eval_map_seq_ops = {
6012 .start = eval_map_start,
6013 .next = eval_map_next,
6014 .stop = eval_map_stop,
6015 .show = eval_map_show,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006016};
6017
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006018static int tracing_eval_map_open(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006019{
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006020 int ret;
6021
6022 ret = tracing_check_open_get_tr(NULL);
6023 if (ret)
6024 return ret;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006025
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006026 return seq_open(filp, &tracing_eval_map_seq_ops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006027}
6028
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006029static const struct file_operations tracing_eval_map_fops = {
6030 .open = tracing_eval_map_open,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006031 .read = seq_read,
6032 .llseek = seq_lseek,
6033 .release = seq_release,
6034};
6035
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006036static inline union trace_eval_map_item *
Jeremy Linton5f60b352017-05-31 16:56:47 -05006037trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006038{
6039 /* Return tail of array given the head */
6040 return ptr + ptr->head.length + 1;
6041}
6042
6043static void
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006044trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006045 int len)
6046{
Jeremy Linton00f4b652017-05-31 16:56:43 -05006047 struct trace_eval_map **stop;
6048 struct trace_eval_map **map;
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006049 union trace_eval_map_item *map_array;
6050 union trace_eval_map_item *ptr;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006051
6052 stop = start + len;
6053
6054 /*
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006055 * The trace_eval_maps contains the map plus a head and tail item,
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006056 * where the head holds the module and length of array, and the
6057 * tail holds a pointer to the next list.
6058 */
Kees Cook6da2ec52018-06-12 13:55:00 -07006059 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006060 if (!map_array) {
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006061 pr_warn("Unable to allocate trace eval mapping\n");
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006062 return;
6063 }
6064
Jeremy Linton1793ed92017-05-31 16:56:46 -05006065 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006066
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006067 if (!trace_eval_maps)
6068 trace_eval_maps = map_array;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006069 else {
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05006070 ptr = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006071 for (;;) {
Jeremy Linton5f60b352017-05-31 16:56:47 -05006072 ptr = trace_eval_jmp_to_tail(ptr);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006073 if (!ptr->tail.next)
6074 break;
6075 ptr = ptr->tail.next;
6076
6077 }
6078 ptr->tail.next = map_array;
6079 }
6080 map_array->head.mod = mod;
6081 map_array->head.length = len;
6082 map_array++;
6083
6084 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6085 map_array->map = **map;
6086 map_array++;
6087 }
6088 memset(map_array, 0, sizeof(*map_array));
6089
Jeremy Linton1793ed92017-05-31 16:56:46 -05006090 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006091}
6092
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006093static void trace_create_eval_file(struct dentry *d_tracer)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006094{
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04006095 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006096 NULL, &tracing_eval_map_fops);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006097}
6098
Jeremy Linton681bec02017-05-31 16:56:53 -05006099#else /* CONFIG_TRACE_EVAL_MAP_FILE */
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006100static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6101static inline void trace_insert_eval_map_file(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05006102 struct trace_eval_map **start, int len) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05006103#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006104
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006105static void trace_insert_eval_map(struct module *mod,
Jeremy Linton00f4b652017-05-31 16:56:43 -05006106 struct trace_eval_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006107{
Jeremy Linton00f4b652017-05-31 16:56:43 -05006108 struct trace_eval_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006109
6110 if (len <= 0)
6111 return;
6112
6113 map = start;
6114
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006115 trace_event_eval_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006116
Jeremy Lintonf57a4142017-05-31 16:56:48 -05006117 trace_insert_eval_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006118}
6119
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006120static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006121tracing_set_trace_read(struct file *filp, char __user *ubuf,
6122 size_t cnt, loff_t *ppos)
6123{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006124 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006125 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006126 int r;
6127
6128 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006129 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006130 mutex_unlock(&trace_types_lock);
6131
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006132 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006133}
6134
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006135int tracer_init(struct tracer *t, struct trace_array *tr)
6136{
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006137 tracing_reset_online_cpus(&tr->array_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006138 return t->init(tr);
6139}
6140
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006141static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006142{
6143 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006144
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006145 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006146 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006147}
6148
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006149#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09006150/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006151static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6152 struct array_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09006153{
6154 int cpu, ret = 0;
6155
6156 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6157 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006158 ret = ring_buffer_resize(trace_buf->buffer,
6159 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006160 if (ret < 0)
6161 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006162 per_cpu_ptr(trace_buf->data, cpu)->entries =
6163 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09006164 }
6165 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006166 ret = ring_buffer_resize(trace_buf->buffer,
6167 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006168 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006169 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6170 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09006171 }
6172
6173 return ret;
6174}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006175#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09006176
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006177static int __tracing_resize_ring_buffer(struct trace_array *tr,
6178 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04006179{
6180 int ret;
6181
6182 /*
6183 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04006184 * we use the size that was given, and we can forget about
6185 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04006186 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006187 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04006188
Steven Rostedtb382ede62012-10-10 21:44:34 -04006189 /* May be called before buffers are initialized */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006190 if (!tr->array_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04006191 return 0;
6192
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006193 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006194 if (ret < 0)
6195 return ret;
6196
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006197#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006198 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6199 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006200 goto out;
6201
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006202 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006203 if (ret < 0) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006204 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6205 &tr->array_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04006206 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04006207 /*
6208 * AARGH! We are left with different
6209 * size max buffer!!!!
6210 * The max buffer is our "snapshot" buffer.
6211 * When a tracer needs a snapshot (one of the
6212 * latency tracers), it swaps the max buffer
6213 * with the saved snap shot. We succeeded to
6214 * update the size of the main buffer, but failed to
6215 * update the size of the max buffer. But when we tried
6216 * to reset the main buffer to the original size, we
6217 * failed there too. This is very unlikely to
6218 * happen, but if it does, warn and kill all
6219 * tracing.
6220 */
Steven Rostedt73c51622009-03-11 13:42:01 -04006221 WARN_ON(1);
6222 tracing_disabled = 1;
6223 }
6224 return ret;
6225 }
6226
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006227 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006228 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006229 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006230 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006231
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006232 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006233#endif /* CONFIG_TRACER_MAX_TRACE */
6234
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006235 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006236 set_buffer_entries(&tr->array_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006237 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006238 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04006239
6240 return ret;
6241}
6242
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09006243ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6244 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006245{
Colin Ian King08b0c9b2021-05-13 12:55:17 +01006246 int ret;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006247
6248 mutex_lock(&trace_types_lock);
6249
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006250 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6251 /* make sure, this cpu is enabled in the mask */
6252 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6253 ret = -EINVAL;
6254 goto out;
6255 }
6256 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006257
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006258 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006259 if (ret < 0)
6260 ret = -ENOMEM;
6261
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006262out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006263 mutex_unlock(&trace_types_lock);
6264
6265 return ret;
6266}
6267
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006268
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006269/**
6270 * tracing_update_buffers - used by tracing facility to expand ring buffers
6271 *
6272 * To save on memory when the tracing is never used on a system with it
6273 * configured in. The ring buffers are set to a minimum size. But once
6274 * a user starts to use the tracing facility, then they need to grow
6275 * to their default size.
6276 *
6277 * This function is to be called when a tracer is about to be used.
6278 */
6279int tracing_update_buffers(void)
6280{
6281 int ret = 0;
6282
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006283 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006284 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006285 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006286 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006287 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04006288
6289 return ret;
6290}
6291
Steven Rostedt577b7852009-02-26 23:43:05 -05006292struct trace_option_dentry;
6293
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006294static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006295create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05006296
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006297/*
6298 * Used to clear out the tracer before deletion of an instance.
6299 * Must have trace_types_lock held.
6300 */
6301static void tracing_set_nop(struct trace_array *tr)
6302{
6303 if (tr->current_trace == &nop_trace)
6304 return;
6305
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006306 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006307
6308 if (tr->current_trace->reset)
6309 tr->current_trace->reset(tr);
6310
6311 tr->current_trace = &nop_trace;
6312}
6313
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006314static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006315{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006316 /* Only enable if the directory has been created already. */
6317 if (!tr->dir)
6318 return;
6319
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006320 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006321}
6322
Masami Hiramatsu9c5b9d32020-01-11 01:06:17 +09006323int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006324{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006325 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006326#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05006327 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006328#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006329 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006330
Steven Rostedt1027fcb2009-03-12 11:33:20 -04006331 mutex_lock(&trace_types_lock);
6332
Steven Rostedt73c51622009-03-11 13:42:01 -04006333 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006334 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006335 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04006336 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01006337 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04006338 ret = 0;
6339 }
6340
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006341 for (t = trace_types; t; t = t->next) {
6342 if (strcmp(t->name, buf) == 0)
6343 break;
6344 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006345 if (!t) {
6346 ret = -EINVAL;
6347 goto out;
6348 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006349 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006350 goto out;
6351
Tom Zanussia35873a2019-02-13 17:42:45 -06006352#ifdef CONFIG_TRACER_SNAPSHOT
6353 if (t->use_max_tr) {
6354 arch_spin_lock(&tr->max_lock);
6355 if (tr->cond_snapshot)
6356 ret = -EBUSY;
6357 arch_spin_unlock(&tr->max_lock);
6358 if (ret)
6359 goto out;
6360 }
6361#endif
Ziqian SUN (Zamir)c7b3ae02017-09-11 14:26:35 +08006362 /* Some tracers won't work on kernel command line */
6363 if (system_state < SYSTEM_RUNNING && t->noboot) {
6364 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6365 t->name);
6366 goto out;
6367 }
6368
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006369 /* Some tracers are only allowed for the top level buffer */
6370 if (!trace_ok_for_array(t, tr)) {
6371 ret = -EINVAL;
6372 goto out;
6373 }
6374
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006375 /* If trace pipe files are being read, we can't change the tracer */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006376 if (tr->trace_ref) {
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006377 ret = -EBUSY;
6378 goto out;
6379 }
6380
Steven Rostedt9f029e82008-11-12 15:24:24 -05006381 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006382
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006383 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006384
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006385 if (tr->current_trace->reset)
6386 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05006387
Paul E. McKenney74401722018-11-06 18:44:52 -08006388 /* Current trace needs to be nop_trace before synchronize_rcu */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006389 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05006390
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05006391#ifdef CONFIG_TRACER_MAX_TRACE
6392 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05006393
6394 if (had_max_tr && !t->use_max_tr) {
6395 /*
6396 * We need to make sure that the update_max_tr sees that
6397 * current_trace changed to nop_trace to keep it from
6398 * swapping the buffers after we resize it.
6399 * The update_max_tr is called from interrupts disabled
6400 * so a synchronized_sched() is sufficient.
6401 */
Paul E. McKenney74401722018-11-06 18:44:52 -08006402 synchronize_rcu();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04006403 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006404 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006405#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006406
6407#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05006408 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04006409 ret = tracing_alloc_snapshot_instance(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09006410 if (ret < 0)
6411 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09006412 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006413#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05006414
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006415 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02006416 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01006417 if (ret)
6418 goto out;
6419 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006420
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006421 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05006422 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05006423 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006424 out:
6425 mutex_unlock(&trace_types_lock);
6426
Peter Zijlstrad9e54072008-11-01 19:57:37 +01006427 return ret;
6428}
6429
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006430static ssize_t
6431tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6432 size_t cnt, loff_t *ppos)
6433{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006434 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08006435 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006436 int i;
6437 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006438 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006439
Steven Rostedt60063a62008-10-28 10:44:24 -04006440 ret = cnt;
6441
Li Zefanee6c2c12009-09-18 14:06:47 +08006442 if (cnt > MAX_TRACER_SIZE)
6443 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006444
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08006445 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006446 return -EFAULT;
6447
6448 buf[cnt] = 0;
6449
6450 /* strip ending whitespace. */
6451 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6452 buf[i] = 0;
6453
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006454 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01006455 if (err)
6456 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006457
Jiri Olsacf8517c2009-10-23 19:36:16 -04006458 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006459
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02006460 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006461}
6462
6463static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006464tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6465 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006466{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006467 char buf[64];
6468 int r;
6469
Steven Rostedtcffae432008-05-12 21:21:00 +02006470 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006471 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02006472 if (r > sizeof(buf))
6473 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006474 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006475}
6476
6477static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006478tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6479 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006480{
Hannes Eder5e398412009-02-10 19:44:34 +01006481 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006482 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006483
Peter Huewe22fe9b52011-06-07 21:58:27 +02006484 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6485 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006486 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006487
6488 *ptr = val * 1000;
6489
6490 return cnt;
6491}
6492
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006493static ssize_t
6494tracing_thresh_read(struct file *filp, char __user *ubuf,
6495 size_t cnt, loff_t *ppos)
6496{
6497 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6498}
6499
6500static ssize_t
6501tracing_thresh_write(struct file *filp, const char __user *ubuf,
6502 size_t cnt, loff_t *ppos)
6503{
6504 struct trace_array *tr = filp->private_data;
6505 int ret;
6506
6507 mutex_lock(&trace_types_lock);
6508 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6509 if (ret < 0)
6510 goto out;
6511
6512 if (tr->current_trace->update_thresh) {
6513 ret = tr->current_trace->update_thresh(tr);
6514 if (ret < 0)
6515 goto out;
6516 }
6517
6518 ret = cnt;
6519out:
6520 mutex_unlock(&trace_types_lock);
6521
6522 return ret;
6523}
6524
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04006525#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Chen Gange428abb2015-11-10 05:15:15 +08006526
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006527static ssize_t
6528tracing_max_lat_read(struct file *filp, char __user *ubuf,
6529 size_t cnt, loff_t *ppos)
6530{
6531 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6532}
6533
6534static ssize_t
6535tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6536 size_t cnt, loff_t *ppos)
6537{
6538 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6539}
6540
Chen Gange428abb2015-11-10 05:15:15 +08006541#endif
6542
Steven Rostedtb3806b42008-05-12 21:20:46 +02006543static int tracing_open_pipe(struct inode *inode, struct file *filp)
6544{
Oleg Nesterov15544202013-07-23 17:25:57 +02006545 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006546 struct trace_iterator *iter;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006547 int ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006548
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04006549 ret = tracing_check_open_get_tr(tr);
6550 if (ret)
6551 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006552
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006553 mutex_lock(&trace_types_lock);
6554
Steven Rostedtb3806b42008-05-12 21:20:46 +02006555 /* create a buffer to store the information to pass to userspace */
6556 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006557 if (!iter) {
6558 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07006559 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006560 goto out;
6561 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006562
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006563 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006564 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006565
6566 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6567 ret = -ENOMEM;
6568 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10306569 }
6570
Steven Rostedta3097202008-11-07 22:36:02 -05006571 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10306572 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05006573
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006574 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04006575 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6576
David Sharp8be07092012-11-13 12:18:22 -08006577 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006578 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08006579 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6580
Oleg Nesterov15544202013-07-23 17:25:57 +02006581 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006582 iter->array_buffer = &tr->array_buffer;
Oleg Nesterov15544202013-07-23 17:25:57 +02006583 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006584 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006585 filp->private_data = iter;
6586
Steven Rostedt107bad82008-05-12 21:21:01 +02006587 if (iter->trace->pipe_open)
6588 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02006589
Arnd Bergmannb4447862010-07-07 23:40:11 +02006590 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006591
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006592 tr->trace_ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006593out:
6594 mutex_unlock(&trace_types_lock);
6595 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006596
6597fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006598 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006599 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006600 mutex_unlock(&trace_types_lock);
6601 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006602}
6603
6604static int tracing_release_pipe(struct inode *inode, struct file *file)
6605{
6606 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02006607 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006608
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006609 mutex_lock(&trace_types_lock);
6610
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04006611 tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006612
Steven Rostedt29bf4a52009-12-09 12:37:43 -05006613 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05006614 iter->trace->pipe_close(iter);
6615
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006616 mutex_unlock(&trace_types_lock);
6617
Rusty Russell44623442009-01-01 10:12:23 +10306618 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006619 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006620 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006621
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006622 trace_array_put(tr);
6623
Steven Rostedtb3806b42008-05-12 21:20:46 +02006624 return 0;
6625}
6626
Al Viro9dd95742017-07-03 00:42:43 -04006627static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006628trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006629{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006630 struct trace_array *tr = iter->tr;
6631
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006632 /* Iterators are static, they should be filled or empty */
6633 if (trace_buffer_iter(iter, iter->cpu_file))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006634 return EPOLLIN | EPOLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006635
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006636 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006637 /*
6638 * Always select as readable when in blocking mode
6639 */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08006640 return EPOLLIN | EPOLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006641 else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006642 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05006643 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006644}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006645
Al Viro9dd95742017-07-03 00:42:43 -04006646static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006647tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6648{
6649 struct trace_iterator *iter = filp->private_data;
6650
6651 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02006652}
6653
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006654/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006655static int tracing_wait_pipe(struct file *filp)
6656{
6657 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006658 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006659
6660 while (trace_empty(iter)) {
6661
6662 if ((filp->f_flags & O_NONBLOCK)) {
6663 return -EAGAIN;
6664 }
6665
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006666 /*
Liu Bo250bfd32013-01-14 10:54:11 +08006667 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006668 * We still block if tracing is disabled, but we have never
6669 * read anything. This allows a user to cat this file, and
6670 * then enable tracing. But after we have read something,
6671 * we give an EOF when tracing is again disabled.
6672 *
6673 * iter->pos will be 0 if we haven't read anything.
6674 */
Tahsin Erdogan75df6e62017-09-17 03:23:48 -07006675 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006676 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006677
6678 mutex_unlock(&iter->mutex);
6679
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05006680 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04006681
6682 mutex_lock(&iter->mutex);
6683
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006684 if (ret)
6685 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006686 }
6687
6688 return 1;
6689}
6690
Steven Rostedtb3806b42008-05-12 21:20:46 +02006691/*
6692 * Consumer reader.
6693 */
6694static ssize_t
6695tracing_read_pipe(struct file *filp, char __user *ubuf,
6696 size_t cnt, loff_t *ppos)
6697{
6698 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006699 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006700
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006701 /*
6702 * Avoid more than one consumer on a single file descriptor
6703 * This is just a matter of traces coherency, the ring buffer itself
6704 * is protected.
6705 */
6706 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)12458002016-09-23 22:57:13 -04006707
6708 /* return any leftover data */
6709 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6710 if (sret != -EBUSY)
6711 goto out;
6712
6713 trace_seq_init(&iter->seq);
6714
Steven Rostedt107bad82008-05-12 21:21:01 +02006715 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006716 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6717 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02006718 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02006719 }
6720
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006721waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006722 sret = tracing_wait_pipe(filp);
6723 if (sret <= 0)
6724 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006725
6726 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006727 if (trace_empty(iter)) {
6728 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02006729 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02006730 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02006731
6732 if (cnt >= PAGE_SIZE)
6733 cnt = PAGE_SIZE - 1;
6734
Steven Rostedt53d0aa72008-05-12 21:21:01 +02006735 /* reset all but tr, trace, and overruns */
Steven Rostedt (VMware)2768c1e2021-12-10 20:26:16 -05006736 trace_iterator_reset(iter);
Andrew Vagined5467d2013-08-02 21:16:43 +04006737 cpumask_clear(iter->started);
Petr Mladekd303de12019-10-11 16:21:34 +02006738 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006739
Lai Jiangshan4f535962009-05-18 19:35:34 +08006740 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006741 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05006742 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006743 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006744 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006745
Ingo Molnarf9896bf2008-05-12 21:20:47 +02006746 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02006747 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02006748 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006749 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006750 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02006751 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01006752 if (ret != TRACE_TYPE_NO_CONSUME)
6753 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006754
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006755 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02006756 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01006757
6758 /*
6759 * Setting the full flag means we reached the trace_seq buffer
6760 * size and we should leave by partial output condition above.
6761 * One of the trace_seq_* functions is not used properly.
6762 */
6763 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6764 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02006765 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006766 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006767 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02006768
Steven Rostedtb3806b42008-05-12 21:20:46 +02006769 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006770 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006771 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05006772 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006773
6774 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006775 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006776 * entries, go back to wait for more entries.
6777 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006778 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02006779 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006780
Steven Rostedt107bad82008-05-12 21:21:01 +02006781out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006782 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02006783
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02006784 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02006785}
6786
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006787static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6788 unsigned int idx)
6789{
6790 __free_page(spd->pages[idx]);
6791}
6792
Steven Rostedt34cd4992009-02-09 12:06:29 -05006793static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006794tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006795{
6796 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006797 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006798 int ret;
6799
6800 /* Seq buffer is page-sized, exactly what we need. */
6801 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006802 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006803 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006804
6805 if (trace_seq_has_overflowed(&iter->seq)) {
6806 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006807 break;
6808 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006809
6810 /*
6811 * This should not be hit, because it should only
6812 * be set if the iter->seq overflowed. But check it
6813 * anyway to be safe.
6814 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05006815 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006816 iter->seq.seq.len = save_len;
6817 break;
6818 }
6819
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006820 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05006821 if (rem < count) {
6822 rem = 0;
6823 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006824 break;
6825 }
6826
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006827 if (ret != TRACE_TYPE_NO_CONSUME)
6828 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05006829 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05006830 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05006831 rem = 0;
6832 iter->ent = NULL;
6833 break;
6834 }
6835 }
6836
6837 return rem;
6838}
6839
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006840static ssize_t tracing_splice_read_pipe(struct file *filp,
6841 loff_t *ppos,
6842 struct pipe_inode_info *pipe,
6843 size_t len,
6844 unsigned int flags)
6845{
Jens Axboe35f3d142010-05-20 10:43:18 +02006846 struct page *pages_def[PIPE_DEF_BUFFERS];
6847 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006848 struct trace_iterator *iter = filp->private_data;
6849 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02006850 .pages = pages_def,
6851 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006852 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02006853 .nr_pages_max = PIPE_DEF_BUFFERS,
Christoph Hellwig6797d972020-05-20 17:58:13 +02006854 .ops = &default_pipe_buf_ops,
Steven Rostedt34cd4992009-02-09 12:06:29 -05006855 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006856 };
6857 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006858 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006859 unsigned int i;
6860
Jens Axboe35f3d142010-05-20 10:43:18 +02006861 if (splice_grow_spd(pipe, &spd))
6862 return -ENOMEM;
6863
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006864 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006865
6866 if (iter->trace->splice_read) {
6867 ret = iter->trace->splice_read(iter, filp,
6868 ppos, pipe, len, flags);
6869 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006870 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006871 }
6872
6873 ret = tracing_wait_pipe(filp);
6874 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05006875 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006876
Jason Wessel955b61e2010-08-05 09:22:23 -05006877 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006878 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05006879 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006880 }
6881
Lai Jiangshan4f535962009-05-18 19:35:34 +08006882 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006883 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006884
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006885 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04006886 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006887 spd.pages[i] = alloc_page(GFP_KERNEL);
6888 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05006889 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006890
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01006891 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006892
6893 /* Copy the data into the page, so we can start over. */
6894 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02006895 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006896 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006897 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02006898 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006899 break;
6900 }
Jens Axboe35f3d142010-05-20 10:43:18 +02006901 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006902 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006903
Steven Rostedtf9520752009-03-02 14:04:40 -05006904 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006905 }
6906
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006907 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08006908 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006909 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006910
6911 spd.nr_pages = i;
6912
Steven Rostedt (Red Hat)a29054d92016-03-18 15:46:48 -04006913 if (i)
6914 ret = splice_to_pipe(pipe, &spd);
6915 else
6916 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02006917out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006918 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02006919 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006920
Steven Rostedt34cd4992009-02-09 12:06:29 -05006921out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01006922 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02006923 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02006924}
6925
Steven Rostedta98a3c32008-05-12 21:20:59 +02006926static ssize_t
6927tracing_entries_read(struct file *filp, char __user *ubuf,
6928 size_t cnt, loff_t *ppos)
6929{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006930 struct inode *inode = file_inode(filp);
6931 struct trace_array *tr = inode->i_private;
6932 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006933 char buf[64];
6934 int r = 0;
6935 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006936
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006937 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006938
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006939 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006940 int cpu, buf_size_same;
6941 unsigned long size;
6942
6943 size = 0;
6944 buf_size_same = 1;
6945 /* check if all cpu sizes are same */
6946 for_each_tracing_cpu(cpu) {
6947 /* fill in the size from first enabled cpu */
6948 if (size == 0)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006949 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6950 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006951 buf_size_same = 0;
6952 break;
6953 }
6954 }
6955
6956 if (buf_size_same) {
6957 if (!ring_buffer_expanded)
6958 r = sprintf(buf, "%lu (expanded: %lu)\n",
6959 size >> 10,
6960 trace_buf_size >> 10);
6961 else
6962 r = sprintf(buf, "%lu\n", size >> 10);
6963 } else
6964 r = sprintf(buf, "X\n");
6965 } else
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05006966 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006967
Steven Rostedtdb526ca2009-03-12 13:53:25 -04006968 mutex_unlock(&trace_types_lock);
6969
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006970 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6971 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006972}
6973
6974static ssize_t
6975tracing_entries_write(struct file *filp, const char __user *ubuf,
6976 size_t cnt, loff_t *ppos)
6977{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006978 struct inode *inode = file_inode(filp);
6979 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006980 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006981 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006982
Peter Huewe22fe9b52011-06-07 21:58:27 +02006983 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6984 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02006985 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006986
6987 /* must have at least 1 entry */
6988 if (!val)
6989 return -EINVAL;
6990
Steven Rostedt1696b2b2008-11-13 00:09:35 -05006991 /* value is in KB */
6992 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006993 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006994 if (ret < 0)
6995 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006996
Jiri Olsacf8517c2009-10-23 19:36:16 -04006997 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02006998
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07006999 return cnt;
7000}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05007001
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007002static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007003tracing_total_entries_read(struct file *filp, char __user *ubuf,
7004 size_t cnt, loff_t *ppos)
7005{
7006 struct trace_array *tr = filp->private_data;
7007 char buf[64];
7008 int r, cpu;
7009 unsigned long size = 0, expanded_size = 0;
7010
7011 mutex_lock(&trace_types_lock);
7012 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007013 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007014 if (!ring_buffer_expanded)
7015 expanded_size += trace_buf_size >> 10;
7016 }
7017 if (ring_buffer_expanded)
7018 r = sprintf(buf, "%lu\n", size);
7019 else
7020 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7021 mutex_unlock(&trace_types_lock);
7022
7023 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7024}
7025
7026static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007027tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7028 size_t cnt, loff_t *ppos)
7029{
7030 /*
7031 * There is no need to read what the user has written, this function
7032 * is just to make sure that there is no error when "echo" is used
7033 */
7034
7035 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02007036
7037 return cnt;
7038}
7039
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007040static int
7041tracing_free_buffer_release(struct inode *inode, struct file *filp)
7042{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007043 struct trace_array *tr = inode->i_private;
7044
Steven Rostedtcf30cf62011-06-14 22:44:07 -04007045 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007046 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07007047 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007048 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007049 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007050
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007051 trace_array_put(tr);
7052
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007053 return 0;
7054}
7055
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007056static ssize_t
7057tracing_mark_write(struct file *filp, const char __user *ubuf,
7058 size_t cnt, loff_t *fpos)
7059{
Alexander Z Lam2d716192013-07-01 15:31:24 -07007060 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04007061 struct ring_buffer_event *event;
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007062 enum event_trigger_type tt = ETT_NONE;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007063 struct trace_buffer *buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04007064 struct print_entry *entry;
Steven Rostedtd696b582011-09-22 11:50:27 -04007065 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04007066 int size;
7067 int len;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007068
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007069/* Used in tracing_mark_raw_write() as well */
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007070#define FAULTED_STR "<faulted>"
7071#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007072
Steven Rostedtc76f0692008-11-07 22:36:02 -05007073 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007074 return -EINVAL;
7075
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007076 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07007077 return -EINVAL;
7078
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007079 if (cnt > TRACE_BUF_SIZE)
7080 cnt = TRACE_BUF_SIZE;
7081
Steven Rostedtd696b582011-09-22 11:50:27 -04007082 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007083
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007084 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7085
7086 /* If less than "<faulted>", then make sure we can still add that */
7087 if (cnt < FAULTED_SIZE)
7088 size += FAULTED_SIZE - cnt;
7089
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007090 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05007091 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01007092 tracing_gen_ctx());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007093 if (unlikely(!event))
Steven Rostedtd696b582011-09-22 11:50:27 -04007094 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007095 return -EBADF;
Steven Rostedtd696b582011-09-22 11:50:27 -04007096
7097 entry = ring_buffer_event_data(event);
7098 entry->ip = _THIS_IP_;
7099
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007100 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7101 if (len) {
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007102 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007103 cnt = FAULTED_SIZE;
7104 written = -EFAULT;
Steven Rostedtd696b582011-09-22 11:50:27 -04007105 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007106 written = cnt;
Steven Rostedtd696b582011-09-22 11:50:27 -04007107
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007108 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7109 /* do not add \n before testing triggers, but add \0 */
7110 entry->buf[cnt] = '\0';
Steven Rostedt (VMware)b47e3302021-03-16 12:41:03 -04007111 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007112 }
7113
Steven Rostedtd696b582011-09-22 11:50:27 -04007114 if (entry->buf[cnt - 1] != '\n') {
7115 entry->buf[cnt] = '\n';
7116 entry->buf[cnt + 1] = '\0';
7117 } else
7118 entry->buf[cnt] = '\0';
7119
Tingwei Zhang458999c2020-10-05 10:13:15 +03007120 if (static_branch_unlikely(&trace_marker_exports_enabled))
7121 ftrace_exports(event, TRACE_EXPORT_MARKER);
Steven Rostedt7ffbd482012-10-11 12:14:25 -04007122 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04007123
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04007124 if (tt)
7125 event_triggers_post_call(tr->trace_marker_file, tt);
7126
Steven Rostedtfa32e852016-07-06 15:25:08 -04007127 return written;
7128}
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007129
Steven Rostedtfa32e852016-07-06 15:25:08 -04007130/* Limit it for now to 3K (including tag) */
7131#define RAW_DATA_MAX_SIZE (1024*3)
7132
7133static ssize_t
7134tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7135 size_t cnt, loff_t *fpos)
7136{
7137 struct trace_array *tr = filp->private_data;
7138 struct ring_buffer_event *event;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05007139 struct trace_buffer *buffer;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007140 struct raw_data_entry *entry;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007141 ssize_t written;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007142 int size;
7143 int len;
7144
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007145#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7146
Steven Rostedtfa32e852016-07-06 15:25:08 -04007147 if (tracing_disabled)
7148 return -EINVAL;
7149
7150 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7151 return -EINVAL;
7152
7153 /* The marker must at least have a tag id */
7154 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7155 return -EINVAL;
7156
7157 if (cnt > TRACE_BUF_SIZE)
7158 cnt = TRACE_BUF_SIZE;
7159
7160 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7161
Steven Rostedtfa32e852016-07-06 15:25:08 -04007162 size = sizeof(*entry) + cnt;
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007163 if (cnt < FAULT_SIZE_ID)
7164 size += FAULT_SIZE_ID - cnt;
7165
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007166 buffer = tr->array_buffer.buffer;
Steven Rostedt (Red Hat)3e9a8aa2016-11-23 11:29:58 -05007167 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
Sebastian Andrzej Siewior36590c502021-01-25 20:45:08 +01007168 tracing_gen_ctx());
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007169 if (!event)
Steven Rostedtfa32e852016-07-06 15:25:08 -04007170 /* Ring buffer disabled, return as if not open for write */
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007171 return -EBADF;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007172
7173 entry = ring_buffer_event_data(event);
7174
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007175 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7176 if (len) {
7177 entry->id = -1;
Rasmus Villemoes0f5e5a32019-03-20 09:17:57 +01007178 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007179 written = -EFAULT;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007180 } else
Steven Rostedt (Red Hat)656c7f02016-12-08 12:40:18 -05007181 written = cnt;
Steven Rostedtfa32e852016-07-06 15:25:08 -04007182
7183 __buffer_unlock_commit(buffer, event);
7184
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02007185 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007186}
7187
Li Zefan13f16d22009-12-08 11:16:11 +08007188static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08007189{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007190 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08007191 int i;
7192
7193 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08007194 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08007195 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007196 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7197 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08007198 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08007199
Li Zefan13f16d22009-12-08 11:16:11 +08007200 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08007201}
7202
Tom Zanussid71bd342018-01-15 20:52:07 -06007203int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08007204{
Zhaolei5079f322009-08-25 16:12:56 +08007205 int i;
7206
Zhaolei5079f322009-08-25 16:12:56 +08007207 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7208 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7209 break;
7210 }
7211 if (i == ARRAY_SIZE(trace_clocks))
7212 return -EINVAL;
7213
Zhaolei5079f322009-08-25 16:12:56 +08007214 mutex_lock(&trace_types_lock);
7215
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007216 tr->clock_id = i;
7217
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007218 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08007219
David Sharp60303ed2012-10-11 16:27:52 -07007220 /*
7221 * New clock may not be consistent with the previous clock.
7222 * Reset the buffer so that it doesn't have incomparable timestamps.
7223 */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007224 tracing_reset_online_cpus(&tr->array_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007225
7226#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liu170b3b12017-09-05 16:57:19 -05007227 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007228 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07007229 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007230#endif
David Sharp60303ed2012-10-11 16:27:52 -07007231
Zhaolei5079f322009-08-25 16:12:56 +08007232 mutex_unlock(&trace_types_lock);
7233
Steven Rostedte1e232c2014-02-10 23:38:46 -05007234 return 0;
7235}
7236
7237static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7238 size_t cnt, loff_t *fpos)
7239{
7240 struct seq_file *m = filp->private_data;
7241 struct trace_array *tr = m->private;
7242 char buf[64];
7243 const char *clockstr;
7244 int ret;
7245
7246 if (cnt >= sizeof(buf))
7247 return -EINVAL;
7248
Wang Xiaoqiang4afe6492016-04-18 15:23:29 +08007249 if (copy_from_user(buf, ubuf, cnt))
Steven Rostedte1e232c2014-02-10 23:38:46 -05007250 return -EFAULT;
7251
7252 buf[cnt] = 0;
7253
7254 clockstr = strstrip(buf);
7255
7256 ret = tracing_set_clock(tr, clockstr);
7257 if (ret)
7258 return ret;
7259
Zhaolei5079f322009-08-25 16:12:56 +08007260 *fpos += cnt;
7261
7262 return cnt;
7263}
7264
Li Zefan13f16d22009-12-08 11:16:11 +08007265static int tracing_clock_open(struct inode *inode, struct file *file)
7266{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007267 struct trace_array *tr = inode->i_private;
7268 int ret;
7269
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007270 ret = tracing_check_open_get_tr(tr);
7271 if (ret)
7272 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007273
7274 ret = single_open(file, tracing_clock_show, inode->i_private);
7275 if (ret < 0)
7276 trace_array_put(tr);
7277
7278 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08007279}
7280
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007281static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7282{
7283 struct trace_array *tr = m->private;
7284
7285 mutex_lock(&trace_types_lock);
7286
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007287 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007288 seq_puts(m, "delta [absolute]\n");
7289 else
7290 seq_puts(m, "[delta] absolute\n");
7291
7292 mutex_unlock(&trace_types_lock);
7293
7294 return 0;
7295}
7296
7297static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7298{
7299 struct trace_array *tr = inode->i_private;
7300 int ret;
7301
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007302 ret = tracing_check_open_get_tr(tr);
7303 if (ret)
7304 return ret;
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007305
7306 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7307 if (ret < 0)
7308 trace_array_put(tr);
7309
7310 return ret;
7311}
7312
Steven Rostedt (VMware)d8279bfc2021-03-16 12:41:07 -04007313u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7314{
7315 if (rbe == this_cpu_read(trace_buffered_event))
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03007316 return ring_buffer_time_stamp(buffer);
Steven Rostedt (VMware)d8279bfc2021-03-16 12:41:07 -04007317
7318 return ring_buffer_event_time_stamp(buffer, rbe);
7319}
7320
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007321/*
7322 * Set or disable using the per CPU trace_buffer_event when possible.
7323 */
7324int tracing_set_filter_buffering(struct trace_array *tr, bool set)
Tom Zanussi00b41452018-01-15 20:51:39 -06007325{
7326 int ret = 0;
7327
7328 mutex_lock(&trace_types_lock);
7329
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007330 if (set && tr->no_filter_buffering_ref++)
Tom Zanussi00b41452018-01-15 20:51:39 -06007331 goto out;
7332
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007333 if (!set) {
7334 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
Tom Zanussi00b41452018-01-15 20:51:39 -06007335 ret = -EINVAL;
7336 goto out;
7337 }
7338
Steven Rostedt (VMware)b94bc802021-03-16 12:41:05 -04007339 --tr->no_filter_buffering_ref;
Tom Zanussi00b41452018-01-15 20:51:39 -06007340 }
Tom Zanussi00b41452018-01-15 20:51:39 -06007341 out:
7342 mutex_unlock(&trace_types_lock);
7343
7344 return ret;
7345}
7346
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007347struct ftrace_buffer_info {
7348 struct trace_iterator iter;
7349 void *spare;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04007350 unsigned int spare_cpu;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007351 unsigned int read;
7352};
7353
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007354#ifdef CONFIG_TRACER_SNAPSHOT
7355static int tracing_snapshot_open(struct inode *inode, struct file *file)
7356{
Oleg Nesterov6484c712013-07-23 17:26:10 +02007357 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007358 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007359 struct seq_file *m;
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007360 int ret;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007361
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007362 ret = tracing_check_open_get_tr(tr);
7363 if (ret)
7364 return ret;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007365
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007366 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02007367 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007368 if (IS_ERR(iter))
7369 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007370 } else {
7371 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007372 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007373 m = kzalloc(sizeof(*m), GFP_KERNEL);
7374 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007375 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007376 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7377 if (!iter) {
7378 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007379 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007380 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007381 ret = 0;
7382
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007383 iter->tr = tr;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007384 iter->array_buffer = &tr->max_buffer;
Oleg Nesterov6484c712013-07-23 17:26:10 +02007385 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007386 m->private = iter;
7387 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007388 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07007389out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007390 if (ret < 0)
7391 trace_array_put(tr);
7392
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007393 return ret;
7394}
7395
7396static ssize_t
7397tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7398 loff_t *ppos)
7399{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007400 struct seq_file *m = filp->private_data;
7401 struct trace_iterator *iter = m->private;
7402 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007403 unsigned long val;
7404 int ret;
7405
7406 ret = tracing_update_buffers();
7407 if (ret < 0)
7408 return ret;
7409
7410 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7411 if (ret)
7412 return ret;
7413
7414 mutex_lock(&trace_types_lock);
7415
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007416 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007417 ret = -EBUSY;
7418 goto out;
7419 }
7420
Tom Zanussia35873a2019-02-13 17:42:45 -06007421 arch_spin_lock(&tr->max_lock);
7422 if (tr->cond_snapshot)
7423 ret = -EBUSY;
7424 arch_spin_unlock(&tr->max_lock);
7425 if (ret)
7426 goto out;
7427
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007428 switch (val) {
7429 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007430 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7431 ret = -EINVAL;
7432 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007433 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04007434 if (tr->allocated_snapshot)
7435 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007436 break;
7437 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007438/* Only allow per-cpu swap if the ring buffer supports it */
7439#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7440 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7441 ret = -EINVAL;
7442 break;
7443 }
7444#endif
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007445 if (tr->allocated_snapshot)
7446 ret = resize_buffer_duplicate_size(&tr->max_buffer,
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007447 &tr->array_buffer, iter->cpu_file);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007448 else
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04007449 ret = tracing_alloc_snapshot_instance(tr);
Eiichi Tsukata46cc0b42019-06-25 10:29:10 +09007450 if (ret < 0)
7451 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007452 local_irq_disable();
7453 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007454 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Tom Zanussia35873a2019-02-13 17:42:45 -06007455 update_max_tr(tr, current, smp_processor_id(), NULL);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007456 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007457 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007458 local_irq_enable();
7459 break;
7460 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05007461 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007462 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7463 tracing_reset_online_cpus(&tr->max_buffer);
7464 else
Steven Rostedt (VMware)a47b53e2019-08-13 12:14:35 -04007465 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05007466 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007467 break;
7468 }
7469
7470 if (ret >= 0) {
7471 *ppos += cnt;
7472 ret = cnt;
7473 }
7474out:
7475 mutex_unlock(&trace_types_lock);
7476 return ret;
7477}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007478
7479static int tracing_snapshot_release(struct inode *inode, struct file *file)
7480{
7481 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007482 int ret;
7483
7484 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007485
7486 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04007487 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007488
7489 /* If write only, the seq_file is just a stub */
7490 if (m)
7491 kfree(m->private);
7492 kfree(m);
7493
7494 return 0;
7495}
7496
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007497static int tracing_buffers_open(struct inode *inode, struct file *filp);
7498static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7499 size_t count, loff_t *ppos);
7500static int tracing_buffers_release(struct inode *inode, struct file *file);
7501static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7502 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7503
7504static int snapshot_raw_open(struct inode *inode, struct file *filp)
7505{
7506 struct ftrace_buffer_info *info;
7507 int ret;
7508
Steven Rostedt (VMware)17911ff2019-10-11 17:22:50 -04007509 /* The following checks for tracefs lockdown */
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007510 ret = tracing_buffers_open(inode, filp);
7511 if (ret < 0)
7512 return ret;
7513
7514 info = filp->private_data;
7515
7516 if (info->iter.trace->use_max_tr) {
7517 tracing_buffers_release(inode, filp);
7518 return -EBUSY;
7519 }
7520
7521 info->iter.snapshot = true;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007522 info->iter.array_buffer = &info->iter.tr->max_buffer;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007523
7524 return ret;
7525}
7526
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007527#endif /* CONFIG_TRACER_SNAPSHOT */
7528
7529
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007530static const struct file_operations tracing_thresh_fops = {
7531 .open = tracing_open_generic,
7532 .read = tracing_thresh_read,
7533 .write = tracing_thresh_write,
7534 .llseek = generic_file_llseek,
7535};
7536
Steven Rostedt (Red Hat)f971cc92016-09-07 12:45:09 -04007537#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007538static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007539 .open = tracing_open_generic,
7540 .read = tracing_max_lat_read,
7541 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007542 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007543};
Chen Gange428abb2015-11-10 05:15:15 +08007544#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007545
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007546static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007547 .open = tracing_open_generic,
7548 .read = tracing_set_trace_read,
7549 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007550 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007551};
7552
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007553static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007554 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02007555 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007556 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02007557 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02007558 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007559 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02007560};
7561
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007562static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007563 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007564 .read = tracing_entries_read,
7565 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02007566 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02007567 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02007568};
7569
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007570static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007571 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007572 .read = tracing_total_entries_read,
7573 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007574 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07007575};
7576
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007577static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007578 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07007579 .write = tracing_free_buffer_write,
7580 .release = tracing_free_buffer_release,
7581};
7582
Steven Rostedt5e2336a2009-03-05 21:44:55 -05007583static const struct file_operations tracing_mark_fops = {
John Keeping2972e302021-12-07 14:25:58 +00007584 .open = tracing_mark_open,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007585 .write = tracing_mark_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007586 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007587};
7588
Steven Rostedtfa32e852016-07-06 15:25:08 -04007589static const struct file_operations tracing_mark_raw_fops = {
John Keeping2972e302021-12-07 14:25:58 +00007590 .open = tracing_mark_open,
Steven Rostedtfa32e852016-07-06 15:25:08 -04007591 .write = tracing_mark_raw_write,
Steven Rostedtfa32e852016-07-06 15:25:08 -04007592 .release = tracing_release_generic_tr,
7593};
7594
Zhaolei5079f322009-08-25 16:12:56 +08007595static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08007596 .open = tracing_clock_open,
7597 .read = seq_read,
7598 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007599 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08007600 .write = tracing_clock_write,
7601};
7602
Tom Zanussi2c1ea602018-01-15 20:51:41 -06007603static const struct file_operations trace_time_stamp_mode_fops = {
7604 .open = tracing_time_stamp_mode_open,
7605 .read = seq_read,
7606 .llseek = seq_lseek,
7607 .release = tracing_single_release_tr,
7608};
7609
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007610#ifdef CONFIG_TRACER_SNAPSHOT
7611static const struct file_operations snapshot_fops = {
7612 .open = tracing_snapshot_open,
7613 .read = seq_read,
7614 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05007615 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007616 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007617};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007618
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007619static const struct file_operations snapshot_raw_fops = {
7620 .open = snapshot_raw_open,
7621 .read = tracing_buffers_read,
7622 .release = tracing_buffers_release,
7623 .splice_read = tracing_buffers_splice_read,
7624 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05007625};
7626
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05007627#endif /* CONFIG_TRACER_SNAPSHOT */
7628
Daniel Bristot de Oliveirabc87cf0a2021-06-22 16:42:23 +02007629/*
7630 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7631 * @filp: The active open file structure
7632 * @ubuf: The userspace provided buffer to read value into
7633 * @cnt: The maximum number of bytes to read
7634 * @ppos: The current "file" position
7635 *
7636 * This function implements the write interface for a struct trace_min_max_param.
7637 * The filp->private_data must point to a trace_min_max_param structure that
7638 * defines where to write the value, the min and the max acceptable values,
7639 * and a lock to protect the write.
7640 */
7641static ssize_t
7642trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7643{
7644 struct trace_min_max_param *param = filp->private_data;
7645 u64 val;
7646 int err;
7647
7648 if (!param)
7649 return -EFAULT;
7650
7651 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7652 if (err)
7653 return err;
7654
7655 if (param->lock)
7656 mutex_lock(param->lock);
7657
7658 if (param->min && val < *param->min)
7659 err = -EINVAL;
7660
7661 if (param->max && val > *param->max)
7662 err = -EINVAL;
7663
7664 if (!err)
7665 *param->val = val;
7666
7667 if (param->lock)
7668 mutex_unlock(param->lock);
7669
7670 if (err)
7671 return err;
7672
7673 return cnt;
7674}
7675
7676/*
7677 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7678 * @filp: The active open file structure
7679 * @ubuf: The userspace provided buffer to read value into
7680 * @cnt: The maximum number of bytes to read
7681 * @ppos: The current "file" position
7682 *
7683 * This function implements the read interface for a struct trace_min_max_param.
7684 * The filp->private_data must point to a trace_min_max_param struct with valid
7685 * data.
7686 */
7687static ssize_t
7688trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7689{
7690 struct trace_min_max_param *param = filp->private_data;
7691 char buf[U64_STR_SIZE];
7692 int len;
7693 u64 val;
7694
7695 if (!param)
7696 return -EFAULT;
7697
7698 val = *param->val;
7699
7700 if (cnt > sizeof(buf))
7701 cnt = sizeof(buf);
7702
7703 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7704
7705 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7706}
7707
7708const struct file_operations trace_min_max_fops = {
7709 .open = tracing_open_generic,
7710 .read = trace_min_max_read,
7711 .write = trace_min_max_write,
7712};
7713
Tom Zanussi8a062902019-03-31 18:48:15 -05007714#define TRACING_LOG_ERRS_MAX 8
7715#define TRACING_LOG_LOC_MAX 128
7716
7717#define CMD_PREFIX " Command: "
7718
7719struct err_info {
7720 const char **errs; /* ptr to loc-specific array of err strings */
7721 u8 type; /* index into errs -> specific err string */
7722 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7723 u64 ts;
7724};
7725
7726struct tracing_log_err {
7727 struct list_head list;
7728 struct err_info info;
7729 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7730 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7731};
7732
Tom Zanussi8a062902019-03-31 18:48:15 -05007733static DEFINE_MUTEX(tracing_err_log_lock);
7734
YueHaibingff585c52019-06-14 23:32:10 +08007735static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007736{
7737 struct tracing_log_err *err;
7738
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007739 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007740 err = kzalloc(sizeof(*err), GFP_KERNEL);
7741 if (!err)
7742 err = ERR_PTR(-ENOMEM);
Tom Zanussi67ab5eb2022-01-27 15:44:18 -06007743 else
7744 tr->n_err_log_entries++;
Tom Zanussi8a062902019-03-31 18:48:15 -05007745
7746 return err;
7747 }
7748
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007749 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
Tom Zanussi8a062902019-03-31 18:48:15 -05007750 list_del(&err->list);
7751
7752 return err;
7753}
7754
7755/**
7756 * err_pos - find the position of a string within a command for error careting
7757 * @cmd: The tracing command that caused the error
7758 * @str: The string to position the caret at within @cmd
7759 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +01007760 * Finds the position of the first occurrence of @str within @cmd. The
Tom Zanussi8a062902019-03-31 18:48:15 -05007761 * return value can be passed to tracing_log_err() for caret placement
7762 * within @cmd.
7763 *
Ingo Molnarf2cc0202021-03-23 18:49:35 +01007764 * Returns the index within @cmd of the first occurrence of @str or 0
Tom Zanussi8a062902019-03-31 18:48:15 -05007765 * if @str was not found.
7766 */
7767unsigned int err_pos(char *cmd, const char *str)
7768{
7769 char *found;
7770
7771 if (WARN_ON(!strlen(cmd)))
7772 return 0;
7773
7774 found = strstr(cmd, str);
7775 if (found)
7776 return found - cmd;
7777
7778 return 0;
7779}
7780
7781/**
7782 * tracing_log_err - write an error to the tracing error log
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007783 * @tr: The associated trace array for the error (NULL for top level array)
Tom Zanussi8a062902019-03-31 18:48:15 -05007784 * @loc: A string describing where the error occurred
7785 * @cmd: The tracing command that caused the error
7786 * @errs: The array of loc-specific static error strings
7787 * @type: The index into errs[], which produces the specific static err string
7788 * @pos: The position the caret should be placed in the cmd
7789 *
7790 * Writes an error into tracing/error_log of the form:
7791 *
7792 * <loc>: error: <text>
7793 * Command: <cmd>
7794 * ^
7795 *
7796 * tracing/error_log is a small log file containing the last
7797 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7798 * unless there has been a tracing error, and the error log can be
7799 * cleared and have its memory freed by writing the empty string in
7800 * truncation mode to it i.e. echo > tracing/error_log.
7801 *
7802 * NOTE: the @errs array along with the @type param are used to
7803 * produce a static error string - this string is not copied and saved
7804 * when the error is logged - only a pointer to it is saved. See
7805 * existing callers for examples of how static strings are typically
7806 * defined for use with tracing_log_err().
7807 */
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007808void tracing_log_err(struct trace_array *tr,
7809 const char *loc, const char *cmd,
Tom Zanussi8a062902019-03-31 18:48:15 -05007810 const char **errs, u8 type, u8 pos)
7811{
7812 struct tracing_log_err *err;
7813
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007814 if (!tr)
7815 tr = &global_trace;
7816
Tom Zanussi8a062902019-03-31 18:48:15 -05007817 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007818 err = get_tracing_log_err(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007819 if (PTR_ERR(err) == -ENOMEM) {
7820 mutex_unlock(&tracing_err_log_lock);
7821 return;
7822 }
7823
7824 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7825 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7826
7827 err->info.errs = errs;
7828 err->info.type = type;
7829 err->info.pos = pos;
7830 err->info.ts = local_clock();
7831
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007832 list_add_tail(&err->list, &tr->err_log);
Tom Zanussi8a062902019-03-31 18:48:15 -05007833 mutex_unlock(&tracing_err_log_lock);
7834}
7835
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007836static void clear_tracing_err_log(struct trace_array *tr)
Tom Zanussi8a062902019-03-31 18:48:15 -05007837{
7838 struct tracing_log_err *err, *next;
7839
7840 mutex_lock(&tracing_err_log_lock);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007841 list_for_each_entry_safe(err, next, &tr->err_log, list) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007842 list_del(&err->list);
7843 kfree(err);
7844 }
7845
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007846 tr->n_err_log_entries = 0;
Tom Zanussi8a062902019-03-31 18:48:15 -05007847 mutex_unlock(&tracing_err_log_lock);
7848}
7849
7850static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7851{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007852 struct trace_array *tr = m->private;
7853
Tom Zanussi8a062902019-03-31 18:48:15 -05007854 mutex_lock(&tracing_err_log_lock);
7855
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007856 return seq_list_start(&tr->err_log, *pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007857}
7858
7859static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7860{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007861 struct trace_array *tr = m->private;
7862
7863 return seq_list_next(v, &tr->err_log, pos);
Tom Zanussi8a062902019-03-31 18:48:15 -05007864}
7865
7866static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7867{
7868 mutex_unlock(&tracing_err_log_lock);
7869}
7870
7871static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7872{
7873 u8 i;
7874
7875 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7876 seq_putc(m, ' ');
7877 for (i = 0; i < pos; i++)
7878 seq_putc(m, ' ');
7879 seq_puts(m, "^\n");
7880}
7881
7882static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7883{
7884 struct tracing_log_err *err = v;
7885
7886 if (err) {
7887 const char *err_text = err->info.errs[err->info.type];
7888 u64 sec = err->info.ts;
7889 u32 nsec;
7890
7891 nsec = do_div(sec, NSEC_PER_SEC);
7892 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7893 err->loc, err_text);
7894 seq_printf(m, "%s", err->cmd);
7895 tracing_err_log_show_pos(m, err->info.pos);
7896 }
7897
7898 return 0;
7899}
7900
7901static const struct seq_operations tracing_err_log_seq_ops = {
7902 .start = tracing_err_log_seq_start,
7903 .next = tracing_err_log_seq_next,
7904 .stop = tracing_err_log_seq_stop,
7905 .show = tracing_err_log_seq_show
7906};
7907
7908static int tracing_err_log_open(struct inode *inode, struct file *file)
7909{
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007910 struct trace_array *tr = inode->i_private;
Tom Zanussi8a062902019-03-31 18:48:15 -05007911 int ret = 0;
7912
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007913 ret = tracing_check_open_get_tr(tr);
7914 if (ret)
7915 return ret;
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007916
Tom Zanussi8a062902019-03-31 18:48:15 -05007917 /* If this file was opened for write, then erase contents */
7918 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007919 clear_tracing_err_log(tr);
Tom Zanussi8a062902019-03-31 18:48:15 -05007920
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007921 if (file->f_mode & FMODE_READ) {
Tom Zanussi8a062902019-03-31 18:48:15 -05007922 ret = seq_open(file, &tracing_err_log_seq_ops);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04007923 if (!ret) {
7924 struct seq_file *m = file->private_data;
7925 m->private = tr;
7926 } else {
7927 trace_array_put(tr);
7928 }
7929 }
Tom Zanussi8a062902019-03-31 18:48:15 -05007930 return ret;
7931}
7932
7933static ssize_t tracing_err_log_write(struct file *file,
7934 const char __user *buffer,
7935 size_t count, loff_t *ppos)
7936{
7937 return count;
7938}
7939
Takeshi Misawad122ed62019-06-28 19:56:40 +09007940static int tracing_err_log_release(struct inode *inode, struct file *file)
7941{
7942 struct trace_array *tr = inode->i_private;
7943
7944 trace_array_put(tr);
7945
7946 if (file->f_mode & FMODE_READ)
7947 seq_release(inode, file);
7948
7949 return 0;
7950}
7951
Tom Zanussi8a062902019-03-31 18:48:15 -05007952static const struct file_operations tracing_err_log_fops = {
7953 .open = tracing_err_log_open,
7954 .write = tracing_err_log_write,
7955 .read = seq_read,
7956 .llseek = seq_lseek,
Takeshi Misawad122ed62019-06-28 19:56:40 +09007957 .release = tracing_err_log_release,
Tom Zanussi8a062902019-03-31 18:48:15 -05007958};
7959
Steven Rostedt2cadf912008-12-01 22:20:19 -05007960static int tracing_buffers_open(struct inode *inode, struct file *filp)
7961{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007962 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007963 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007964 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007965
Steven Rostedt (VMware)8530dec2019-10-11 17:39:57 -04007966 ret = tracing_check_open_get_tr(tr);
7967 if (ret)
7968 return ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007969
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08007970 info = kvzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007971 if (!info) {
7972 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05007973 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007974 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05007975
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007976 mutex_lock(&trace_types_lock);
7977
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007978 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02007979 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05007980 info->iter.trace = tr->current_trace;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05007981 info->iter.array_buffer = &tr->array_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007982 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007983 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05007984 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007985
7986 filp->private_data = info;
7987
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04007988 tr->trace_ref++;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05007989
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05007990 mutex_unlock(&trace_types_lock);
7991
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04007992 ret = nonseekable_open(inode, filp);
7993 if (ret < 0)
7994 trace_array_put(tr);
7995
7996 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05007997}
7998
Al Viro9dd95742017-07-03 00:42:43 -04007999static __poll_t
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008000tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8001{
8002 struct ftrace_buffer_info *info = filp->private_data;
8003 struct trace_iterator *iter = &info->iter;
8004
8005 return trace_poll(iter, filp, poll_table);
8006}
8007
Steven Rostedt2cadf912008-12-01 22:20:19 -05008008static ssize_t
8009tracing_buffers_read(struct file *filp, char __user *ubuf,
8010 size_t count, loff_t *ppos)
8011{
8012 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008013 struct trace_iterator *iter = &info->iter;
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008014 ssize_t ret = 0;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008015 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008016
Steven Rostedt2dc5d122009-03-04 19:10:05 -05008017 if (!count)
8018 return 0;
8019
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008020#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008021 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8022 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008023#endif
8024
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008025 if (!info->spare) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008026 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008027 iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008028 if (IS_ERR(info->spare)) {
8029 ret = PTR_ERR(info->spare);
8030 info->spare = NULL;
8031 } else {
8032 info->spare_cpu = iter->cpu_file;
8033 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008034 }
Lai Jiangshanddd538f2009-04-02 15:16:59 +08008035 if (!info->spare)
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008036 return ret;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08008037
Steven Rostedt2cadf912008-12-01 22:20:19 -05008038 /* Do we have previous read data to read? */
8039 if (info->read < PAGE_SIZE)
8040 goto read;
8041
Steven Rostedtb6273442013-02-28 13:44:11 -05008042 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008043 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008044 ret = ring_buffer_read_page(iter->array_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008045 &info->spare,
8046 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008047 iter->cpu_file, 0);
8048 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05008049
8050 if (ret < 0) {
8051 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008052 if ((filp->f_flags & O_NONBLOCK))
8053 return -EAGAIN;
8054
Steven Rostedt (VMware)2c2b0a72018-11-29 20:32:26 -05008055 ret = wait_on_pipe(iter, 0);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008056 if (ret)
8057 return ret;
8058
Steven Rostedtb6273442013-02-28 13:44:11 -05008059 goto again;
8060 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008061 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05008062 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05008063
Steven Rostedt436fc282011-10-14 10:44:25 -04008064 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05008065 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05008066 size = PAGE_SIZE - info->read;
8067 if (size > count)
8068 size = count;
8069
8070 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008071 if (ret == size)
8072 return -EFAULT;
8073
Steven Rostedt2dc5d122009-03-04 19:10:05 -05008074 size -= ret;
8075
Steven Rostedt2cadf912008-12-01 22:20:19 -05008076 *ppos += size;
8077 info->read += size;
8078
8079 return size;
8080}
8081
8082static int tracing_buffers_release(struct inode *inode, struct file *file)
8083{
8084 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008085 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008086
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008087 mutex_lock(&trace_types_lock);
8088
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04008089 iter->tr->trace_ref--;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05008090
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04008091 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008092
Lai Jiangshanddd538f2009-04-02 15:16:59 +08008093 if (info->spare)
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008094 ring_buffer_free_read_page(iter->array_buffer->buffer,
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008095 info->spare_cpu, info->spare);
Zhaoyang Huang0f69dae2020-07-31 08:27:45 +08008096 kvfree(info);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008097
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05008098 mutex_unlock(&trace_types_lock);
8099
Steven Rostedt2cadf912008-12-01 22:20:19 -05008100 return 0;
8101}
8102
8103struct buffer_ref {
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008104 struct trace_buffer *buffer;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008105 void *page;
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008106 int cpu;
Jann Hornb9872222019-04-04 23:59:25 +02008107 refcount_t refcount;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008108};
8109
Jann Hornb9872222019-04-04 23:59:25 +02008110static void buffer_ref_release(struct buffer_ref *ref)
8111{
8112 if (!refcount_dec_and_test(&ref->refcount))
8113 return;
8114 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8115 kfree(ref);
8116}
8117
Steven Rostedt2cadf912008-12-01 22:20:19 -05008118static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8119 struct pipe_buffer *buf)
8120{
8121 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8122
Jann Hornb9872222019-04-04 23:59:25 +02008123 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008124 buf->private = 0;
8125}
8126
Matthew Wilcox15fab632019-04-05 14:02:10 -07008127static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008128 struct pipe_buffer *buf)
8129{
8130 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8131
Linus Torvaldse9e1a2e2019-04-26 11:09:55 -07008132 if (refcount_read(&ref->refcount) > INT_MAX/2)
Matthew Wilcox15fab632019-04-05 14:02:10 -07008133 return false;
8134
Jann Hornb9872222019-04-04 23:59:25 +02008135 refcount_inc(&ref->refcount);
Matthew Wilcox15fab632019-04-05 14:02:10 -07008136 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008137}
8138
8139/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08008140static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05008141 .release = buffer_pipe_buf_release,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008142 .get = buffer_pipe_buf_get,
8143};
8144
8145/*
8146 * Callback from splice_to_pipe(), if we need to release some pages
8147 * at the end of the spd in case we error'ed out in filling the pipe.
8148 */
8149static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8150{
8151 struct buffer_ref *ref =
8152 (struct buffer_ref *)spd->partial[i].private;
8153
Jann Hornb9872222019-04-04 23:59:25 +02008154 buffer_ref_release(ref);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008155 spd->partial[i].private = 0;
8156}
8157
8158static ssize_t
8159tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8160 struct pipe_inode_info *pipe, size_t len,
8161 unsigned int flags)
8162{
8163 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008164 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02008165 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8166 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05008167 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02008168 .pages = pages_def,
8169 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02008170 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008171 .ops = &buffer_pipe_buf_ops,
8172 .spd_release = buffer_spd_release,
8173 };
8174 struct buffer_ref *ref;
Steven Rostedt (VMware)6b7e6332017-12-22 20:38:57 -05008175 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01008176 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008177
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008178#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008179 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8180 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008181#endif
8182
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008183 if (*ppos & (PAGE_SIZE - 1))
8184 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008185
8186 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008187 if (len < PAGE_SIZE)
8188 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008189 len &= PAGE_MASK;
8190 }
8191
Al Viro1ae22932016-09-17 18:31:46 -04008192 if (splice_grow_spd(pipe, &spd))
8193 return -ENOMEM;
8194
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008195 again:
8196 trace_access_lock(iter->cpu_file);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008197 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04008198
Al Viroa786c062014-04-11 12:01:03 -04008199 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05008200 struct page *page;
8201 int r;
8202
8203 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01008204 if (!ref) {
8205 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008206 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01008207 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05008208
Jann Hornb9872222019-04-04 23:59:25 +02008209 refcount_set(&ref->refcount, 1);
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008210 ref->buffer = iter->array_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008211 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt (VMware)a7e52ad2017-08-02 14:20:54 -04008212 if (IS_ERR(ref->page)) {
8213 ret = PTR_ERR(ref->page);
8214 ref->page = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008215 kfree(ref);
8216 break;
8217 }
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008218 ref->cpu = iter->cpu_file;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008219
8220 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008221 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008222 if (r < 0) {
Steven Rostedt (VMware)73a757e2017-05-01 09:35:09 -04008223 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8224 ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008225 kfree(ref);
8226 break;
8227 }
8228
Steven Rostedt2cadf912008-12-01 22:20:19 -05008229 page = virt_to_page(ref->page);
8230
8231 spd.pages[i] = page;
8232 spd.partial[i].len = PAGE_SIZE;
8233 spd.partial[i].offset = 0;
8234 spd.partial[i].private = (unsigned long)ref;
8235 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08008236 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04008237
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008238 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008239 }
8240
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008241 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05008242 spd.nr_pages = i;
8243
8244 /* did we read anything? */
8245 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01008246 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04008247 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01008248
Al Viro1ae22932016-09-17 18:31:46 -04008249 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008250 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro1ae22932016-09-17 18:31:46 -04008251 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05008252
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008253 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04008254 if (ret)
Al Viro1ae22932016-09-17 18:31:46 -04008255 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01008256
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008257 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05008258 }
8259
8260 ret = splice_to_pipe(pipe, &spd);
Al Viro1ae22932016-09-17 18:31:46 -04008261out:
Eric Dumazet047fe362012-06-12 15:24:40 +02008262 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008263
Steven Rostedt2cadf912008-12-01 22:20:19 -05008264 return ret;
8265}
8266
8267static const struct file_operations tracing_buffers_fops = {
8268 .open = tracing_buffers_open,
8269 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05008270 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05008271 .release = tracing_buffers_release,
8272 .splice_read = tracing_buffers_splice_read,
8273 .llseek = no_llseek,
8274};
8275
Steven Rostedtc8d77182009-04-29 18:03:45 -04008276static ssize_t
8277tracing_stats_read(struct file *filp, char __user *ubuf,
8278 size_t count, loff_t *ppos)
8279{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008280 struct inode *inode = file_inode(filp);
8281 struct trace_array *tr = inode->i_private;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05008282 struct array_buffer *trace_buf = &tr->array_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008283 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008284 struct trace_seq *s;
8285 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008286 unsigned long long t;
8287 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04008288
Li Zefane4f2d102009-06-15 10:57:28 +08008289 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008290 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01008291 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04008292
8293 trace_seq_init(s);
8294
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008295 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008296 trace_seq_printf(s, "entries: %ld\n", cnt);
8297
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008298 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008299 trace_seq_printf(s, "overrun: %ld\n", cnt);
8300
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008301 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008302 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8303
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008304 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008305 trace_seq_printf(s, "bytes: %ld\n", cnt);
8306
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09008307 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008308 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008309 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008310 usec_rem = do_div(t, USEC_PER_SEC);
8311 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8312 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008313
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03008314 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008315 usec_rem = do_div(t, USEC_PER_SEC);
8316 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8317 } else {
8318 /* counter or tsc mode for trace_clock */
8319 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008320 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008321
8322 trace_seq_printf(s, "now ts: %llu\n",
Yordan Karadzhov (VMware)f3ef7202021-03-29 16:03:31 +03008323 ring_buffer_time_stamp(trace_buf->buffer));
Yoshihiro YUNOMAE11043d8b2012-11-13 12:18:23 -08008324 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07008325
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008326 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07008327 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8328
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05008329 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05008330 trace_seq_printf(s, "read events: %ld\n", cnt);
8331
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05008332 count = simple_read_from_buffer(ubuf, count, ppos,
8333 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04008334
8335 kfree(s);
8336
8337 return count;
8338}
8339
8340static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008341 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04008342 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008343 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008344 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04008345};
8346
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008347#ifdef CONFIG_DYNAMIC_FTRACE
8348
8349static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008350tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008351 size_t cnt, loff_t *ppos)
8352{
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008353 ssize_t ret;
8354 char *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008355 int r;
8356
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008357 /* 256 should be plenty to hold the amount needed */
8358 buf = kmalloc(256, GFP_KERNEL);
8359 if (!buf)
8360 return -ENOMEM;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008361
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04008362 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8363 ftrace_update_tot_cnt,
8364 ftrace_number_of_pages,
8365 ftrace_number_of_groups);
8366
8367 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8368 kfree(buf);
8369 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008370}
8371
Steven Rostedt5e2336a2009-03-05 21:44:55 -05008372static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02008373 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04008374 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008375 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008376};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008377#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008378
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008379#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8380static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04008381ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008382 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008383 void *data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008384{
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04008385 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008386}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008387
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008388static void
Steven Rostedt (VMware)bca6c8d2017-04-03 18:18:47 -04008389ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008390 struct trace_array *tr, struct ftrace_probe_ops *ops,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008391 void *data)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008392{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008393 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008394 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008395
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008396 if (mapper)
8397 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008398
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008399 if (count) {
8400
8401 if (*count <= 0)
8402 return;
8403
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008404 (*count)--;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008405 }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008406
Steven Rostedt (VMware)cab50372017-04-20 11:34:06 -04008407 tracing_snapshot_instance(tr);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008408}
8409
8410static int
8411ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8412 struct ftrace_probe_ops *ops, void *data)
8413{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008414 struct ftrace_func_mapper *mapper = data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008415 long *count = NULL;
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008416
8417 seq_printf(m, "%ps:", (void *)ip);
8418
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01008419 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008420
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008421 if (mapper)
8422 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8423
8424 if (count)
8425 seq_printf(m, ":count=%ld\n", *count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008426 else
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008427 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008428
8429 return 0;
8430}
8431
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008432static int
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008433ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008434 unsigned long ip, void *init_data, void **data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008435{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008436 struct ftrace_func_mapper *mapper = *data;
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008437
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008438 if (!mapper) {
8439 mapper = allocate_ftrace_func_mapper();
8440 if (!mapper)
8441 return -ENOMEM;
8442 *data = mapper;
8443 }
8444
8445 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008446}
8447
8448static void
Steven Rostedt (VMware)b5f081b2017-04-10 22:30:05 -04008449ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008450 unsigned long ip, void *data)
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008451{
Steven Rostedt (VMware)6e444312017-04-19 22:39:44 -04008452 struct ftrace_func_mapper *mapper = data;
8453
8454 if (!ip) {
8455 if (!mapper)
8456 return;
8457 free_ftrace_func_mapper(mapper, NULL);
8458 return;
8459 }
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008460
8461 ftrace_func_mapper_remove_ip(mapper, ip);
8462}
8463
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008464static struct ftrace_probe_ops snapshot_probe_ops = {
8465 .func = ftrace_snapshot,
8466 .print = ftrace_snapshot_print,
8467};
8468
8469static struct ftrace_probe_ops snapshot_count_probe_ops = {
8470 .func = ftrace_count_snapshot,
8471 .print = ftrace_snapshot_print,
Steven Rostedt (VMware)1a93f8b2017-04-03 22:09:43 -04008472 .init = ftrace_snapshot_init,
8473 .free = ftrace_snapshot_free,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008474};
8475
8476static int
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008477ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008478 char *glob, char *cmd, char *param, int enable)
8479{
8480 struct ftrace_probe_ops *ops;
8481 void *count = (void *)-1;
8482 char *number;
8483 int ret;
8484
Steven Rostedt (VMware)0f179762017-06-29 10:05:45 -04008485 if (!tr)
8486 return -ENODEV;
8487
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008488 /* hash funcs only work with set_ftrace_filter */
8489 if (!enable)
8490 return -EINVAL;
8491
8492 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8493
Steven Rostedt (VMware)d3d532d2017-04-04 16:44:43 -04008494 if (glob[0] == '!')
Steven Rostedt (VMware)7b60f3d2017-04-18 14:50:39 -04008495 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008496
8497 if (!param)
8498 goto out_reg;
8499
8500 number = strsep(&param, ":");
8501
8502 if (!strlen(number))
8503 goto out_reg;
8504
8505 /*
8506 * We use the callback data field (which is a pointer)
8507 * as our counter.
8508 */
8509 ret = kstrtoul(number, 0, (unsigned long *)&count);
8510 if (ret)
8511 return ret;
8512
8513 out_reg:
Steven Rostedt (VMware)2824f502018-05-28 10:56:36 -04008514 ret = tracing_alloc_snapshot_instance(tr);
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008515 if (ret < 0)
8516 goto out;
8517
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04008518 ret = register_ftrace_function_probe(glob, tr, ops, count);
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008519
Steven Rostedt (VMware)df62db52017-04-19 12:07:08 -04008520 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008521 return ret < 0 ? ret : 0;
8522}
8523
8524static struct ftrace_func_command ftrace_snapshot_cmd = {
8525 .name = "snapshot",
8526 .func = ftrace_trace_snapshot_callback,
8527};
8528
Tom Zanussi38de93a2013-10-24 08:34:18 -05008529static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008530{
8531 return register_ftrace_command(&ftrace_snapshot_cmd);
8532}
8533#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05008534static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04008535#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008536
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008537static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008538{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008539 if (WARN_ON(!tr->dir))
8540 return ERR_PTR(-ENODEV);
8541
8542 /* Top directory uses NULL as the parent */
8543 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8544 return NULL;
8545
8546 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008547 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02008548}
8549
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008550static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8551{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008552 struct dentry *d_tracer;
8553
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008554 if (tr->percpu_dir)
8555 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008556
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008557 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008558 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008559 return NULL;
8560
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008561 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008562
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008563 MEM_FAIL(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008564 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008565
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008566 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008567}
8568
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008569static struct dentry *
8570trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8571 void *data, long cpu, const struct file_operations *fops)
8572{
8573 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8574
8575 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00008576 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c702013-07-23 17:25:54 +02008577 return ret;
8578}
8579
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008580static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008581tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008582{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008583 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008584 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04008585 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008586
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09008587 if (!d_percpu)
8588 return;
8589
Steven Rostedtdd49a382010-10-20 21:51:26 -04008590 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008591 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008592 if (!d_cpu) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008593 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008594 return;
8595 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008596
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01008597 /* per cpu trace_pipe */
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008598 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02008599 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008600
8601 /* per cpu trace */
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008602 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008603 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04008604
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008605 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008606 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04008607
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008608 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02008609 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08008610
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008611 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02008612 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008613
8614#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008615 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02008616 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05008617
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008618 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02008619 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05008620#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01008621}
8622
Steven Rostedt60a11772008-05-12 21:20:44 +02008623#ifdef CONFIG_FTRACE_SELFTEST
8624/* Let selftest have access to static functions in this file */
8625#include "trace_selftest.c"
8626#endif
8627
Steven Rostedt577b7852009-02-26 23:43:05 -05008628static ssize_t
8629trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8630 loff_t *ppos)
8631{
8632 struct trace_option_dentry *topt = filp->private_data;
8633 char *buf;
8634
8635 if (topt->flags->val & topt->opt->bit)
8636 buf = "1\n";
8637 else
8638 buf = "0\n";
8639
8640 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8641}
8642
8643static ssize_t
8644trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8645 loff_t *ppos)
8646{
8647 struct trace_option_dentry *topt = filp->private_data;
8648 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05008649 int ret;
8650
Peter Huewe22fe9b52011-06-07 21:58:27 +02008651 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8652 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05008653 return ret;
8654
Li Zefan8d18eaa2009-12-08 11:17:06 +08008655 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05008656 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08008657
8658 if (!!(topt->flags->val & topt->opt->bit) != val) {
8659 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05008660 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05008661 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08008662 mutex_unlock(&trace_types_lock);
8663 if (ret)
8664 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05008665 }
8666
8667 *ppos += cnt;
8668
8669 return cnt;
8670}
8671
8672
8673static const struct file_operations trace_options_fops = {
8674 .open = tracing_open_generic,
8675 .read = trace_options_read,
8676 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008677 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05008678};
8679
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008680/*
8681 * In order to pass in both the trace_array descriptor as well as the index
8682 * to the flag that the trace option file represents, the trace_array
8683 * has a character array of trace_flags_index[], which holds the index
8684 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8685 * The address of this character array is passed to the flag option file
8686 * read/write callbacks.
8687 *
8688 * In order to extract both the index and the trace_array descriptor,
8689 * get_tr_index() uses the following algorithm.
8690 *
8691 * idx = *ptr;
8692 *
8693 * As the pointer itself contains the address of the index (remember
8694 * index[1] == 1).
8695 *
8696 * Then to get the trace_array descriptor, by subtracting that index
8697 * from the ptr, we get to the start of the index itself.
8698 *
8699 * ptr - idx == &index[0]
8700 *
8701 * Then a simple container_of() from that pointer gets us to the
8702 * trace_array descriptor.
8703 */
8704static void get_tr_index(void *data, struct trace_array **ptr,
8705 unsigned int *pindex)
8706{
8707 *pindex = *(unsigned char *)data;
8708
8709 *ptr = container_of(data - *pindex, struct trace_array,
8710 trace_flags_index);
8711}
8712
Steven Rostedta8259072009-02-26 22:19:12 -05008713static ssize_t
8714trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8715 loff_t *ppos)
8716{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008717 void *tr_index = filp->private_data;
8718 struct trace_array *tr;
8719 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008720 char *buf;
8721
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008722 get_tr_index(tr_index, &tr, &index);
8723
8724 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05008725 buf = "1\n";
8726 else
8727 buf = "0\n";
8728
8729 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8730}
8731
8732static ssize_t
8733trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8734 loff_t *ppos)
8735{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008736 void *tr_index = filp->private_data;
8737 struct trace_array *tr;
8738 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05008739 unsigned long val;
8740 int ret;
8741
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008742 get_tr_index(tr_index, &tr, &index);
8743
Peter Huewe22fe9b52011-06-07 21:58:27 +02008744 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8745 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05008746 return ret;
8747
Zhaoleif2d84b62009-08-07 18:55:48 +08008748 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05008749 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008750
Prateek Sood3a53acf2019-12-10 09:15:16 +00008751 mutex_lock(&event_mutex);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008752 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008753 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04008754 mutex_unlock(&trace_types_lock);
Prateek Sood3a53acf2019-12-10 09:15:16 +00008755 mutex_unlock(&event_mutex);
Steven Rostedta8259072009-02-26 22:19:12 -05008756
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04008757 if (ret < 0)
8758 return ret;
8759
Steven Rostedta8259072009-02-26 22:19:12 -05008760 *ppos += cnt;
8761
8762 return cnt;
8763}
8764
Steven Rostedta8259072009-02-26 22:19:12 -05008765static const struct file_operations trace_options_core_fops = {
8766 .open = tracing_open_generic,
8767 .read = trace_options_core_read,
8768 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02008769 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05008770};
8771
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008772struct dentry *trace_create_file(const char *name,
Al Virof4ae40a62011-07-24 04:33:43 -04008773 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008774 struct dentry *parent,
8775 void *data,
8776 const struct file_operations *fops)
8777{
8778 struct dentry *ret;
8779
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008780 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008781 if (!ret)
Joe Perchesa395d6a2016-03-22 14:28:09 -07008782 pr_warn("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01008783
8784 return ret;
8785}
8786
8787
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008788static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008789{
8790 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05008791
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008792 if (tr->options)
8793 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008794
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05008795 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05008796 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05008797 return NULL;
8798
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05008799 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008800 if (!tr->options) {
Joe Perchesa395d6a2016-03-22 14:28:09 -07008801 pr_warn("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05008802 return NULL;
8803 }
8804
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008805 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05008806}
8807
Steven Rostedt577b7852009-02-26 23:43:05 -05008808static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008809create_trace_option_file(struct trace_array *tr,
8810 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05008811 struct tracer_flags *flags,
8812 struct tracer_opt *opt)
8813{
8814 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05008815
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008816 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05008817 if (!t_options)
8818 return;
8819
8820 topt->flags = flags;
8821 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008822 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05008823
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008824 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8825 t_options, topt, &trace_options_fops);
Steven Rostedt577b7852009-02-26 23:43:05 -05008826
Steven Rostedt577b7852009-02-26 23:43:05 -05008827}
8828
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008829static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008830create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05008831{
8832 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008833 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05008834 struct tracer_flags *flags;
8835 struct tracer_opt *opts;
8836 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008837 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05008838
8839 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008840 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05008841
8842 flags = tracer->flags;
8843
8844 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008845 return;
8846
8847 /*
8848 * If this is an instance, only create flags for tracers
8849 * the instance may have.
8850 */
8851 if (!trace_ok_for_array(tracer, tr))
8852 return;
8853
8854 for (i = 0; i < tr->nr_topts; i++) {
Chunyu Hud39cdd22016-03-08 21:37:01 +08008855 /* Make sure there's no duplicate flags. */
8856 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008857 return;
8858 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008859
8860 opts = flags->opts;
8861
8862 for (cnt = 0; opts[cnt].name; cnt++)
8863 ;
8864
Steven Rostedt0cfe8242009-02-27 10:51:10 -05008865 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05008866 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04008867 return;
8868
8869 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8870 GFP_KERNEL);
8871 if (!tr_topts) {
8872 kfree(topts);
8873 return;
8874 }
8875
8876 tr->topts = tr_topts;
8877 tr->topts[tr->nr_topts].tracer = tracer;
8878 tr->topts[tr->nr_topts].topts = topts;
8879 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05008880
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008881 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008882 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05008883 &opts[cnt]);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05008884 MEM_FAIL(topts[cnt].entry == NULL,
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04008885 "Failed to create trace option: %s",
8886 opts[cnt].name);
8887 }
Steven Rostedt577b7852009-02-26 23:43:05 -05008888}
8889
Steven Rostedta8259072009-02-26 22:19:12 -05008890static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008891create_trace_option_core_file(struct trace_array *tr,
8892 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05008893{
8894 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05008895
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008896 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008897 if (!t_options)
8898 return NULL;
8899
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04008900 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04008901 (void *)&tr->trace_flags_index[index],
8902 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05008903}
8904
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008905static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05008906{
8907 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008908 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05008909 int i;
8910
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008911 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05008912 if (!t_options)
8913 return;
8914
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04008915 for (i = 0; trace_options[i]; i++) {
8916 if (top_level ||
8917 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8918 create_trace_option_core_file(tr, trace_options[i], i);
8919 }
Steven Rostedta8259072009-02-26 22:19:12 -05008920}
8921
Steven Rostedt499e5472012-02-22 15:50:28 -05008922static ssize_t
8923rb_simple_read(struct file *filp, char __user *ubuf,
8924 size_t cnt, loff_t *ppos)
8925{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008926 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05008927 char buf[64];
8928 int r;
8929
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008930 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05008931 r = sprintf(buf, "%d\n", r);
8932
8933 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8934}
8935
8936static ssize_t
8937rb_simple_write(struct file *filp, const char __user *ubuf,
8938 size_t cnt, loff_t *ppos)
8939{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04008940 struct trace_array *tr = filp->private_data;
Steven Rostedt (VMware)13292492019-12-13 13:58:57 -05008941 struct trace_buffer *buffer = tr->array_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05008942 unsigned long val;
8943 int ret;
8944
8945 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8946 if (ret)
8947 return ret;
8948
8949 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008950 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)f1436412018-08-01 15:40:57 -04008951 if (!!val == tracer_tracing_is_on(tr)) {
8952 val = 0; /* do nothing */
8953 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008954 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008955 if (tr->current_trace->start)
8956 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008957 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04008958 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04008959 if (tr->current_trace->stop)
8960 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05008961 }
8962 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05008963 }
8964
8965 (*ppos)++;
8966
8967 return cnt;
8968}
8969
8970static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008971 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008972 .read = rb_simple_read,
8973 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04008974 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05008975 .llseek = default_llseek,
8976};
8977
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05008978static ssize_t
8979buffer_percent_read(struct file *filp, char __user *ubuf,
8980 size_t cnt, loff_t *ppos)
8981{
8982 struct trace_array *tr = filp->private_data;
8983 char buf[64];
8984 int r;
8985
8986 r = tr->buffer_percent;
8987 r = sprintf(buf, "%d\n", r);
8988
8989 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8990}
8991
8992static ssize_t
8993buffer_percent_write(struct file *filp, const char __user *ubuf,
8994 size_t cnt, loff_t *ppos)
8995{
8996 struct trace_array *tr = filp->private_data;
8997 unsigned long val;
8998 int ret;
8999
9000 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9001 if (ret)
9002 return ret;
9003
9004 if (val > 100)
9005 return -EINVAL;
9006
9007 if (!val)
9008 val = 1;
9009
9010 tr->buffer_percent = val;
9011
9012 (*ppos)++;
9013
9014 return cnt;
9015}
9016
9017static const struct file_operations buffer_percent_fops = {
9018 .open = tracing_open_generic_tr,
9019 .read = buffer_percent_read,
9020 .write = buffer_percent_write,
9021 .release = tracing_release_generic_tr,
9022 .llseek = default_llseek,
9023};
9024
YueHaibingff585c52019-06-14 23:32:10 +08009025static struct dentry *trace_instance_dir;
Steven Rostedt277ba042012-08-03 16:10:49 -04009026
9027static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009028init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04009029
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009030static int
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009031allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04009032{
9033 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009034
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009035 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009036
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05009037 buf->tr = tr;
9038
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009039 buf->buffer = ring_buffer_alloc(size, rb_flags);
9040 if (!buf->buffer)
9041 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009042
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009043 buf->data = alloc_percpu(struct trace_array_cpu);
9044 if (!buf->data) {
9045 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)4397f042017-12-26 20:07:34 -05009046 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009047 return -ENOMEM;
9048 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009049
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009050 /* Allocate the first page for all buffers */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009051 set_buffer_entries(&tr->array_buffer,
9052 ring_buffer_size(tr->array_buffer.buffer, 0));
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009053
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009054 return 0;
9055}
9056
9057static int allocate_trace_buffers(struct trace_array *tr, int size)
9058{
9059 int ret;
9060
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009061 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009062 if (ret)
9063 return ret;
9064
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009065#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009066 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9067 allocate_snapshot ? size : 1);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009068 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009069 ring_buffer_free(tr->array_buffer.buffer);
9070 tr->array_buffer.buffer = NULL;
9071 free_percpu(tr->array_buffer.data);
9072 tr->array_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009073 return -ENOMEM;
9074 }
9075 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009076
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05009077 /*
9078 * Only the top level trace array gets its snapshot allocated
9079 * from the kernel command line.
9080 */
9081 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009082#endif
Steven Rostedt (VMware)11f5efc2020-05-06 10:36:18 -04009083
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009084 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009085}
9086
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009087static void free_trace_buffer(struct array_buffer *buf)
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04009088{
9089 if (buf->buffer) {
9090 ring_buffer_free(buf->buffer);
9091 buf->buffer = NULL;
9092 free_percpu(buf->data);
9093 buf->data = NULL;
9094 }
9095}
9096
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009097static void free_trace_buffers(struct trace_array *tr)
9098{
9099 if (!tr)
9100 return;
9101
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009102 free_trace_buffer(&tr->array_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009103
9104#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04009105 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009106#endif
9107}
9108
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009109static void init_trace_flags_index(struct trace_array *tr)
9110{
9111 int i;
9112
9113 /* Used by the trace options files */
9114 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9115 tr->trace_flags_index[i] = i;
9116}
9117
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009118static void __update_tracer_options(struct trace_array *tr)
9119{
9120 struct tracer *t;
9121
9122 for (t = trace_types; t; t = t->next)
9123 add_tracer_options(tr, t);
9124}
9125
9126static void update_tracer_options(struct trace_array *tr)
9127{
9128 mutex_lock(&trace_types_lock);
9129 __update_tracer_options(tr);
9130 mutex_unlock(&trace_types_lock);
9131}
9132
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009133/* Must have trace_types_lock held */
9134struct trace_array *trace_array_find(const char *instance)
9135{
9136 struct trace_array *tr, *found = NULL;
9137
9138 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9139 if (tr->name && strcmp(tr->name, instance) == 0) {
9140 found = tr;
9141 break;
9142 }
9143 }
9144
9145 return found;
9146}
9147
9148struct trace_array *trace_array_find_get(const char *instance)
9149{
9150 struct trace_array *tr;
9151
9152 mutex_lock(&trace_types_lock);
9153 tr = trace_array_find(instance);
9154 if (tr)
9155 tr->ref++;
9156 mutex_unlock(&trace_types_lock);
9157
9158 return tr;
9159}
9160
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009161static int trace_array_create_dir(struct trace_array *tr)
9162{
9163 int ret;
9164
9165 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9166 if (!tr->dir)
9167 return -EINVAL;
9168
9169 ret = event_trace_add_tracer(tr->dir, tr);
Kamal Agrawalff41c282021-07-30 18:53:06 +05309170 if (ret) {
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009171 tracefs_remove(tr->dir);
Kamal Agrawalff41c282021-07-30 18:53:06 +05309172 return ret;
9173 }
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009174
9175 init_tracer_tracefs(tr, tr->dir);
9176 __update_tracer_options(tr);
9177
9178 return ret;
9179}
9180
Divya Indi28879782019-11-20 11:08:38 -08009181static struct trace_array *trace_array_create(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009182{
Steven Rostedt277ba042012-08-03 16:10:49 -04009183 struct trace_array *tr;
9184 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04009185
Steven Rostedt277ba042012-08-03 16:10:49 -04009186 ret = -ENOMEM;
9187 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9188 if (!tr)
Divya Indi28879782019-11-20 11:08:38 -08009189 return ERR_PTR(ret);
Steven Rostedt277ba042012-08-03 16:10:49 -04009190
9191 tr->name = kstrdup(name, GFP_KERNEL);
9192 if (!tr->name)
9193 goto out_free_tr;
9194
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009195 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9196 goto out_free_tr;
9197
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04009198 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009199
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009200 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9201
Steven Rostedt277ba042012-08-03 16:10:49 -04009202 raw_spin_lock_init(&tr->start_lock);
9203
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05009204 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9205
Steven Rostedt277ba042012-08-03 16:10:49 -04009206 tr->current_trace = &nop_trace;
9207
9208 INIT_LIST_HEAD(&tr->systems);
9209 INIT_LIST_HEAD(&tr->events);
Tom Zanussi067fe032018-01-15 20:51:56 -06009210 INIT_LIST_HEAD(&tr->hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -04009211 INIT_LIST_HEAD(&tr->err_log);
Steven Rostedt277ba042012-08-03 16:10:49 -04009212
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05009213 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04009214 goto out_free_tr;
9215
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009216 if (ftrace_allocate_ftrace_ops(tr) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04009217 goto out_free_tr;
9218
Steven Rostedt (VMware)04ec7bb2017-04-05 13:12:55 -04009219 ftrace_init_trace_array(tr);
9220
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04009221 init_trace_flags_index(tr);
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009222
9223 if (trace_instance_dir) {
9224 ret = trace_array_create_dir(tr);
9225 if (ret)
9226 goto out_free_tr;
Masami Hiramatsu720dee52020-09-25 01:40:08 +09009227 } else
9228 __trace_early_add_events(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04009229
9230 list_add(&tr->list, &ftrace_trace_arrays);
9231
Divya Indi28879782019-11-20 11:08:38 -08009232 tr->ref++;
9233
Divya Indif45d1222019-03-20 11:28:51 -07009234 return tr;
Steven Rostedt277ba042012-08-03 16:10:49 -04009235
9236 out_free_tr:
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009237 ftrace_free_ftrace_ops(tr);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04009238 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009239 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04009240 kfree(tr->name);
9241 kfree(tr);
9242
Divya Indif45d1222019-03-20 11:28:51 -07009243 return ERR_PTR(ret);
9244}
Steven Rostedt277ba042012-08-03 16:10:49 -04009245
Divya Indif45d1222019-03-20 11:28:51 -07009246static int instance_mkdir(const char *name)
9247{
Divya Indi28879782019-11-20 11:08:38 -08009248 struct trace_array *tr;
9249 int ret;
9250
9251 mutex_lock(&event_mutex);
9252 mutex_lock(&trace_types_lock);
9253
9254 ret = -EEXIST;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009255 if (trace_array_find(name))
9256 goto out_unlock;
Divya Indi28879782019-11-20 11:08:38 -08009257
9258 tr = trace_array_create(name);
9259
9260 ret = PTR_ERR_OR_ZERO(tr);
9261
9262out_unlock:
9263 mutex_unlock(&trace_types_lock);
9264 mutex_unlock(&event_mutex);
9265 return ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04009266}
9267
Divya Indi28879782019-11-20 11:08:38 -08009268/**
9269 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9270 * @name: The name of the trace array to be looked up/created.
9271 *
9272 * Returns pointer to trace array with given name.
9273 * NULL, if it cannot be created.
9274 *
9275 * NOTE: This function increments the reference counter associated with the
9276 * trace array returned. This makes sure it cannot be freed while in use.
9277 * Use trace_array_put() once the trace array is no longer needed.
Steven Rostedt (VMware)28394da2020-01-24 20:47:46 -05009278 * If the trace_array is to be freed, trace_array_destroy() needs to
9279 * be called after the trace_array_put(), or simply let user space delete
9280 * it from the tracefs instances directory. But until the
9281 * trace_array_put() is called, user space can not delete it.
Divya Indi28879782019-11-20 11:08:38 -08009282 *
9283 */
9284struct trace_array *trace_array_get_by_name(const char *name)
9285{
9286 struct trace_array *tr;
9287
9288 mutex_lock(&event_mutex);
9289 mutex_lock(&trace_types_lock);
9290
9291 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9292 if (tr->name && strcmp(tr->name, name) == 0)
9293 goto out_unlock;
9294 }
9295
9296 tr = trace_array_create(name);
9297
9298 if (IS_ERR(tr))
9299 tr = NULL;
9300out_unlock:
9301 if (tr)
9302 tr->ref++;
9303
9304 mutex_unlock(&trace_types_lock);
9305 mutex_unlock(&event_mutex);
9306 return tr;
9307}
9308EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9309
Divya Indif45d1222019-03-20 11:28:51 -07009310static int __remove_instance(struct trace_array *tr)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009311{
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009312 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009313
Divya Indi28879782019-11-20 11:08:38 -08009314 /* Reference counter for a newly created trace array = 1. */
Steven Rostedt (VMware)7ef282e2020-06-29 23:45:56 -04009315 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
Divya Indif45d1222019-03-20 11:28:51 -07009316 return -EBUSY;
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05009317
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009318 list_del(&tr->list);
9319
Steven Rostedt (Red Hat)20550622016-04-25 22:40:12 -04009320 /* Disable all the flags that were enabled coming in */
9321 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9322 if ((1 << i) & ZEROED_TRACE_FLAGS)
9323 set_tracer_flag(tr, 1 << i, 0);
9324 }
9325
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05009326 tracing_set_nop(tr);
Naveen N. Raoa0e63692017-05-16 23:21:26 +05309327 clear_ftrace_function_probes(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009328 event_trace_del_tracer(tr);
Namhyung Kimd879d0b2017-04-17 11:44:27 +09009329 ftrace_clear_pids(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009330 ftrace_destroy_function_files(tr);
Al Viroa3d1e7e2019-11-18 09:43:10 -05009331 tracefs_remove(tr->dir);
Yordan Karadzhov (VMware)20344c52021-04-15 21:18:51 +03009332 free_percpu(tr->last_func_repeats);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04009333 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009334
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009335 for (i = 0; i < tr->nr_topts; i++) {
9336 kfree(tr->topts[i].topts);
9337 }
9338 kfree(tr->topts);
9339
Chunyu Hudb9108e02017-07-20 18:36:09 +08009340 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009341 kfree(tr->name);
9342 kfree(tr);
9343
Divya Indif45d1222019-03-20 11:28:51 -07009344 return 0;
9345}
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009346
Divya Indie585e642019-08-14 10:55:24 -07009347int trace_array_destroy(struct trace_array *this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07009348{
Divya Indie585e642019-08-14 10:55:24 -07009349 struct trace_array *tr;
Divya Indif45d1222019-03-20 11:28:51 -07009350 int ret;
9351
Divya Indie585e642019-08-14 10:55:24 -07009352 if (!this_tr)
Divya Indif45d1222019-03-20 11:28:51 -07009353 return -EINVAL;
9354
9355 mutex_lock(&event_mutex);
9356 mutex_lock(&trace_types_lock);
9357
Divya Indie585e642019-08-14 10:55:24 -07009358 ret = -ENODEV;
9359
9360 /* Making sure trace array exists before destroying it. */
9361 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9362 if (tr == this_tr) {
9363 ret = __remove_instance(tr);
9364 break;
9365 }
9366 }
Divya Indif45d1222019-03-20 11:28:51 -07009367
9368 mutex_unlock(&trace_types_lock);
9369 mutex_unlock(&event_mutex);
9370
9371 return ret;
9372}
9373EXPORT_SYMBOL_GPL(trace_array_destroy);
9374
9375static int instance_rmdir(const char *name)
9376{
9377 struct trace_array *tr;
9378 int ret;
9379
9380 mutex_lock(&event_mutex);
9381 mutex_lock(&trace_types_lock);
9382
9383 ret = -ENODEV;
Tom Zanussi89c95fc2020-01-29 12:59:21 -06009384 tr = trace_array_find(name);
9385 if (tr)
9386 ret = __remove_instance(tr);
Divya Indif45d1222019-03-20 11:28:51 -07009387
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009388 mutex_unlock(&trace_types_lock);
Steven Rostedt (VMware)12ecef02017-09-21 16:22:49 -04009389 mutex_unlock(&event_mutex);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04009390
9391 return ret;
9392}
9393
Steven Rostedt277ba042012-08-03 16:10:49 -04009394static __init void create_trace_instances(struct dentry *d_tracer)
9395{
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009396 struct trace_array *tr;
9397
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05009398 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9399 instance_mkdir,
9400 instance_rmdir);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009401 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
Steven Rostedt277ba042012-08-03 16:10:49 -04009402 return;
Masami Hiramatsu4114fbf2020-09-10 21:39:07 +09009403
9404 mutex_lock(&event_mutex);
9405 mutex_lock(&trace_types_lock);
9406
9407 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9408 if (!tr->name)
9409 continue;
9410 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9411 "Failed to create instance directory\n"))
9412 break;
9413 }
9414
9415 mutex_unlock(&trace_types_lock);
9416 mutex_unlock(&event_mutex);
Steven Rostedt277ba042012-08-03 16:10:49 -04009417}
9418
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009419static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009420init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009421{
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009422 struct trace_event_file *file;
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009423 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009424
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009425 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05009426 tr, &show_traces_fops);
9427
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009428 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05009429 tr, &set_tracer_fops);
9430
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009431 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07009432 tr, &tracing_cpumask_fops);
9433
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009434 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009435 tr, &tracing_iter_fops);
9436
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009437 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009438 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009439
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009440 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02009441 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009442
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009443 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02009444 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009445
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009446 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009447 tr, &tracing_total_entries_fops);
9448
Wang YanQing238ae932013-05-26 16:52:01 +08009449 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009450 tr, &tracing_free_buffer_fops);
9451
9452 trace_create_file("trace_marker", 0220, d_tracer,
9453 tr, &tracing_mark_fops);
9454
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009455 file = __find_event_file(tr, "ftrace", "print");
9456 if (file && file->dir)
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009457 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9458 file, &event_trigger_fops);
Steven Rostedt (VMware)3dd80952018-05-09 14:17:48 -04009459 tr->trace_marker_file = file;
9460
Steven Rostedtfa32e852016-07-06 15:25:08 -04009461 trace_create_file("trace_marker_raw", 0220, d_tracer,
9462 tr, &tracing_mark_raw_fops);
9463
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009464 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009465 &trace_clock_fops);
9466
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009467 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009468 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009469
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009470 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
Tom Zanussi2c1ea602018-01-15 20:51:41 -06009471 &trace_time_stamp_mode_fops);
9472
Steven Rostedt (VMware)a7b1d742018-11-29 22:36:47 -05009473 tr->buffer_percent = 50;
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05009474
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009475 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
Steven Rostedt (VMware)03329f92018-11-29 21:38:42 -05009476 tr, &buffer_percent_fops);
9477
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04009478 create_trace_options_dir(tr);
9479
Viktor Rosendahl (BMW)91edde22019-10-09 00:08:21 +02009480 trace_create_maxlat_file(tr, d_tracer);
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05009481
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009482 if (ftrace_create_function_files(tr, d_tracer))
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -05009483 MEM_FAIL(1, "Could not allocate function filter files");
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05009484
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009485#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009486 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02009487 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05009488#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009489
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009490 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
Tom Zanussi8a062902019-03-31 18:48:15 -05009491 tr, &tracing_err_log_fops);
9492
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009493 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009494 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05009495
Steven Rostedt (Red Hat)345ddcc2016-04-22 18:11:33 -04009496 ftrace_init_tracefs(tr, d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009497}
9498
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009499static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009500{
9501 struct vfsmount *mnt;
9502 struct file_system_type *type;
9503
9504 /*
9505 * To maintain backward compatibility for tools that mount
9506 * debugfs to get to the tracing facility, tracefs is automatically
9507 * mounted to the debugfs/tracing directory.
9508 */
9509 type = get_fs_type("tracefs");
9510 if (!type)
9511 return NULL;
Eric W. Biederman93faccbb2017-02-01 06:06:16 +13009512 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009513 put_filesystem(type);
9514 if (IS_ERR(mnt))
9515 return NULL;
9516 mntget(mnt);
9517
9518 return mnt;
9519}
9520
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009521/**
9522 * tracing_init_dentry - initialize top level trace array
9523 *
9524 * This is called when creating files or directories in the tracing
9525 * directory. It is called via fs_initcall() by any of the boot up code
9526 * and expects to return the dentry of the top level tracing directory.
9527 */
Wei Yang22c36b12020-07-12 09:10:36 +08009528int tracing_init_dentry(void)
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009529{
9530 struct trace_array *tr = &global_trace;
9531
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009532 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009533 pr_warn("Tracing disabled due to lockdown\n");
Wei Yang22c36b12020-07-12 09:10:36 +08009534 return -EPERM;
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009535 }
9536
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009537 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009538 if (tr->dir)
Wei Yang22c36b12020-07-12 09:10:36 +08009539 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009540
Peter Enderborg072e1332020-07-16 09:15:10 +02009541 if (WARN_ON(!tracefs_initialized()))
Wei Yang22c36b12020-07-12 09:10:36 +08009542 return -ENODEV;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009543
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009544 /*
9545 * As there may still be users that expect the tracing
9546 * files to exist in debugfs/tracing, we must automount
9547 * the tracefs file system there, so older tools still
Ingo Molnarf2cc0202021-03-23 18:49:35 +01009548 * work with the newer kernel.
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05009549 */
9550 tr->dir = debugfs_create_automount("tracing", NULL,
9551 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009552
Wei Yang22c36b12020-07-12 09:10:36 +08009553 return 0;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05009554}
9555
Jeremy Linton00f4b652017-05-31 16:56:43 -05009556extern struct trace_eval_map *__start_ftrace_eval_maps[];
9557extern struct trace_eval_map *__stop_ftrace_eval_maps[];
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009558
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009559static struct workqueue_struct *eval_map_wq __initdata;
9560static struct work_struct eval_map_work __initdata;
9561
9562static void __init eval_map_work_func(struct work_struct *work)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009563{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009564 int len;
9565
Jeremy Linton02fd7f62017-05-31 16:56:42 -05009566 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009567 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009568}
9569
Steven Rostedt (VMware)f6a69462020-12-14 21:03:27 -05009570static int __init trace_eval_init(void)
9571{
9572 INIT_WORK(&eval_map_work, eval_map_work_func);
9573
9574 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9575 if (!eval_map_wq) {
9576 pr_err("Unable to allocate eval_map_wq\n");
9577 /* Do work here */
9578 eval_map_work_func(&eval_map_work);
9579 return -ENOMEM;
9580 }
9581
9582 queue_work(eval_map_wq, &eval_map_work);
9583 return 0;
9584}
9585
9586static int __init trace_eval_sync(void)
9587{
9588 /* Make sure the eval map updates are finished */
9589 if (eval_map_wq)
9590 destroy_workqueue(eval_map_wq);
9591 return 0;
9592}
9593
9594late_initcall_sync(trace_eval_sync);
9595
9596
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009597#ifdef CONFIG_MODULES
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009598static void trace_module_add_evals(struct module *mod)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009599{
Jeremy Linton99be6472017-05-31 16:56:44 -05009600 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009601 return;
9602
9603 /*
9604 * Modules with bad taint do not have events created, do
9605 * not bother with enums either.
9606 */
9607 if (trace_module_has_bad_taint(mod))
9608 return;
9609
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009610 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009611}
9612
Jeremy Linton681bec02017-05-31 16:56:53 -05009613#ifdef CONFIG_TRACE_EVAL_MAP_FILE
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009614static void trace_module_remove_evals(struct module *mod)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009615{
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009616 union trace_eval_map_item *map;
9617 union trace_eval_map_item **last = &trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009618
Jeremy Linton99be6472017-05-31 16:56:44 -05009619 if (!mod->num_trace_evals)
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009620 return;
9621
Jeremy Linton1793ed92017-05-31 16:56:46 -05009622 mutex_lock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009623
Jeremy Linton23bf8cb2017-05-31 16:56:45 -05009624 map = trace_eval_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009625
9626 while (map) {
9627 if (map->head.mod == mod)
9628 break;
Jeremy Linton5f60b352017-05-31 16:56:47 -05009629 map = trace_eval_jmp_to_tail(map);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009630 last = &map->tail.next;
9631 map = map->tail.next;
9632 }
9633 if (!map)
9634 goto out;
9635
Jeremy Linton5f60b352017-05-31 16:56:47 -05009636 *last = trace_eval_jmp_to_tail(map)->tail.next;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009637 kfree(map);
9638 out:
Jeremy Linton1793ed92017-05-31 16:56:46 -05009639 mutex_unlock(&trace_eval_mutex);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009640}
9641#else
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009642static inline void trace_module_remove_evals(struct module *mod) { }
Jeremy Linton681bec02017-05-31 16:56:53 -05009643#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009644
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009645static int trace_module_notify(struct notifier_block *self,
9646 unsigned long val, void *data)
9647{
9648 struct module *mod = data;
9649
9650 switch (val) {
9651 case MODULE_STATE_COMING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009652 trace_module_add_evals(mod);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009653 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009654 case MODULE_STATE_GOING:
Jeremy Lintonf57a4142017-05-31 16:56:48 -05009655 trace_module_remove_evals(mod);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009656 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009657 }
9658
Peter Zijlstra0340a6b2020-08-18 15:57:37 +02009659 return NOTIFY_OK;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009660}
9661
9662static struct notifier_block trace_module_nb = {
9663 .notifier_call = trace_module_notify,
9664 .priority = 0,
9665};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009666#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009667
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05009668static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009669{
Wei Yang22c36b12020-07-12 09:10:36 +08009670 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009671
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08009672 trace_access_lock_init();
9673
Wei Yang22c36b12020-07-12 09:10:36 +08009674 ret = tracing_init_dentry();
9675 if (ret)
Namhyung Kimed6f1c92013-04-10 09:18:12 +09009676 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009677
Steven Rostedt (VMware)58b92542018-05-08 15:09:27 -04009678 event_trace_init();
9679
Wei Yang22c36b12020-07-12 09:10:36 +08009680 init_tracer_tracefs(&global_trace, NULL);
9681 ftrace_init_tracefs_toplevel(&global_trace, NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009682
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009683 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04009684 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009685
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009686 trace_create_file("README", TRACE_MODE_READ, NULL,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01009687 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02009688
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009689 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
Avadh Patel69abe6a2009-04-10 16:04:48 -04009690 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03009691
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009692 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09009693 NULL, &tracing_saved_cmdlines_size_fops);
9694
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009695 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
Michael Sartain99c621d2017-07-05 22:07:15 -06009696 NULL, &tracing_saved_tgids_fops);
9697
Jeremy Linton5f60b352017-05-31 16:56:47 -05009698 trace_eval_init();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04009699
Wei Yang22c36b12020-07-12 09:10:36 +08009700 trace_create_eval_file(NULL);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04009701
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04009702#ifdef CONFIG_MODULES
9703 register_module_notifier(&trace_module_nb);
9704#endif
9705
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009706#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (VMware)21ccc9c2021-08-18 11:24:51 -04009707 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
Steven Rostedt (VMware)da537f02019-10-01 14:38:07 -04009708 NULL, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009709#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01009710
Wei Yang22c36b12020-07-12 09:10:36 +08009711 create_trace_instances(NULL);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09009712
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04009713 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05009714
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01009715 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009716}
9717
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -04009718fs_initcall(tracer_init_tracefs);
9719
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009720static int trace_panic_handler(struct notifier_block *this,
9721 unsigned long event, void *unused)
9722{
Steven Rostedt944ac422008-10-23 19:26:08 -04009723 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009724 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009725 return NOTIFY_OK;
9726}
9727
9728static struct notifier_block trace_panic_notifier = {
9729 .notifier_call = trace_panic_handler,
9730 .next = NULL,
9731 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9732};
9733
9734static int trace_die_handler(struct notifier_block *self,
9735 unsigned long val,
9736 void *data)
9737{
9738 switch (val) {
9739 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04009740 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009741 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009742 break;
9743 default:
9744 break;
9745 }
9746 return NOTIFY_OK;
9747}
9748
9749static struct notifier_block trace_die_notifier = {
9750 .notifier_call = trace_die_handler,
9751 .priority = 200
9752};
9753
9754/*
9755 * printk is set to max of 1024, we really don't need it that big.
9756 * Nothing should be printing 1000 characters anyway.
9757 */
9758#define TRACE_MAX_PRINT 1000
9759
9760/*
9761 * Define here KERN_TRACE so that we have one place to modify
9762 * it if we decide to change what log level the ftrace dump
9763 * should be at.
9764 */
Steven Rostedt428aee12009-01-14 12:24:42 -05009765#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009766
Jason Wessel955b61e2010-08-05 09:22:23 -05009767void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009768trace_printk_seq(struct trace_seq *s)
9769{
9770 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009771 if (s->seq.len >= TRACE_MAX_PRINT)
9772 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009773
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05009774 /*
9775 * More paranoid code. Although the buffer size is set to
9776 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9777 * an extra layer of protection.
9778 */
9779 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9780 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009781
9782 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04009783 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009784
9785 printk(KERN_TRACE "%s", s->buffer);
9786
Steven Rostedtf9520752009-03-02 14:04:40 -05009787 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009788}
9789
Jason Wessel955b61e2010-08-05 09:22:23 -05009790void trace_init_global_iter(struct trace_iterator *iter)
9791{
9792 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04009793 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05009794 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009795 iter->array_buffer = &global_trace.array_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009796
9797 if (iter->trace && iter->trace->open)
9798 iter->trace->open(iter);
9799
9800 /* Annotate start of buffers if we had overruns */
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009801 if (ring_buffer_overruns(iter->array_buffer->buffer))
Cody P Schaferb2f974d2013-10-23 11:49:57 -07009802 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9803
9804 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9805 if (trace_clocks[iter->tr->clock_id].in_ns)
9806 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05009807}
9808
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009809void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009810{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009811 /* use static because iter can be a bit big for the stack */
9812 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009813 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009814 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009815 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04009816 unsigned long flags;
9817 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009818
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009819 /* Only allow one dump user at a time. */
9820 if (atomic_inc_return(&dump_running) != 1) {
9821 atomic_dec(&dump_running);
9822 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04009823 }
9824
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009825 /*
9826 * Always turn off tracing when we dump.
9827 * We don't need to show trace output of what happens
9828 * between multiple crashes.
9829 *
9830 * If the user does a sysrq-z, then they can re-enable
9831 * tracing with echo 1 > tracing_on.
9832 */
9833 tracing_off();
9834
9835 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009836
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08009837 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05009838 trace_init_global_iter(&iter);
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09009839 /* Can not use kmalloc for iter.temp and iter.fmt */
Steven Rostedt (VMware)8e99cf92020-04-01 22:44:46 -04009840 iter.temp = static_temp_buf;
9841 iter.temp_size = STATIC_TEMP_BUF_SIZE;
Masami Hiramatsuefbbdaa2020-10-15 23:55:07 +09009842 iter.fmt = static_fmt_buf;
9843 iter.fmt_size = STATIC_FMT_BUF_SIZE;
Jason Wessel955b61e2010-08-05 09:22:23 -05009844
Steven Rostedtd7690412008-10-01 00:29:53 -04009845 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009846 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04009847 }
9848
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009849 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009850
Török Edwinb54d3de2008-11-22 13:28:48 +02009851 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009852 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02009853
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009854 switch (oops_dump_mode) {
9855 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05009856 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009857 break;
9858 case DUMP_ORIG:
9859 iter.cpu_file = raw_smp_processor_id();
9860 break;
9861 case DUMP_NONE:
9862 goto out_enable;
9863 default:
9864 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05009865 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009866 }
9867
9868 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009869
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009870 /* Did function tracer already get disabled? */
9871 if (ftrace_is_dead()) {
9872 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9873 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9874 }
9875
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009876 /*
Randy Dunlap5c8c2062020-08-06 20:32:59 -07009877 * We need to stop all tracing on all CPUS to read
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009878 * the next buffer. This is a bit expensive, but is
9879 * not done often. We fill all what we can read,
9880 * and then release the locks again.
9881 */
9882
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009883 while (!trace_empty(&iter)) {
9884
9885 if (!cnt)
9886 printk(KERN_TRACE "---------------------------------\n");
9887
9888 cnt++;
9889
Miguel Ojeda0c97bf82019-05-23 14:45:35 +02009890 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009891 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009892
Jason Wessel955b61e2010-08-05 09:22:23 -05009893 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08009894 int ret;
9895
9896 ret = print_trace_line(&iter);
9897 if (ret != TRACE_TYPE_NO_CONSUME)
9898 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009899 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05009900 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009901
9902 trace_printk_seq(&iter.seq);
9903 }
9904
9905 if (!cnt)
9906 printk(KERN_TRACE " (ftrace buffer empty)\n");
9907 else
9908 printk(KERN_TRACE "---------------------------------\n");
9909
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02009910 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04009911 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009912
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04009913 for_each_tracing_cpu(cpu) {
Steven Rostedt (VMware)1c5eb442020-01-09 18:53:48 -05009914 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009915 }
Petr Mladek03fc7f92018-06-27 16:20:28 +02009916 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04009917 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04009918}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07009919EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01009920
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009921#define WRITE_BUFSIZE 4096
9922
9923ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9924 size_t count, loff_t *ppos,
Masami Hiramatsud2622712021-02-01 13:48:11 -06009925 int (*createfn)(const char *))
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009926{
9927 char *kbuf, *buf, *tmp;
9928 int ret = 0;
9929 size_t done = 0;
9930 size_t size;
9931
9932 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9933 if (!kbuf)
9934 return -ENOMEM;
9935
9936 while (done < count) {
9937 size = count - done;
9938
9939 if (size >= WRITE_BUFSIZE)
9940 size = WRITE_BUFSIZE - 1;
9941
9942 if (copy_from_user(kbuf, buffer + done, size)) {
9943 ret = -EFAULT;
9944 goto out;
9945 }
9946 kbuf[size] = '\0';
9947 buf = kbuf;
9948 do {
9949 tmp = strchr(buf, '\n');
9950 if (tmp) {
9951 *tmp = '\0';
9952 size = tmp - buf + 1;
9953 } else {
9954 size = strlen(buf);
9955 if (done + size < count) {
9956 if (buf != kbuf)
9957 break;
9958 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9959 pr_warn("Line length is too long: Should be less than %d\n",
9960 WRITE_BUFSIZE - 2);
9961 ret = -EINVAL;
9962 goto out;
9963 }
9964 }
9965 done += size;
9966
9967 /* Remove comments */
9968 tmp = strchr(buf, '#');
9969
9970 if (tmp)
9971 *tmp = '\0';
9972
Masami Hiramatsud2622712021-02-01 13:48:11 -06009973 ret = createfn(buf);
Tom Zanussi7e465ba2017-09-22 14:58:20 -05009974 if (ret)
9975 goto out;
9976 buf += size;
9977
9978 } while (done < count);
9979 }
9980 ret = done;
9981
9982out:
9983 kfree(kbuf);
9984
9985 return ret;
9986}
9987
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02009988__init static int tracer_alloc_buffers(void)
9989{
Steven Rostedt73c51622009-03-11 13:42:01 -04009990 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10309991 int ret = -ENOMEM;
9992
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009993
9994 if (security_locked_down(LOCKDOWN_TRACEFS)) {
Stephen Rothwellee195452019-12-06 09:25:03 +11009995 pr_warn("Tracing disabled due to lockdown\n");
Steven Rostedt (VMware)a3566462019-12-02 16:25:27 -05009996 return -EPERM;
9997 }
9998
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04009999 /*
Qiujun Huang499f7bb2020-10-10 22:09:24 +080010000 * Make sure we don't accidentally add more trace options
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -040010001 * than we have bits for.
10002 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -040010003 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -040010004
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010005 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10006 goto out;
10007
Alexander Z Lamccfe9e42013-08-08 09:47:45 -070010008 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010009 goto out_free_buffer_mask;
10010
Steven Rostedt07d777f2011-09-22 14:01:55 -040010011 /* Only allocate trace_printk buffers if a trace_printk exists */
Nathan Chancellorbf2cbe02020-02-19 22:10:12 -070010012 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -040010013 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -040010014 trace_printk_init_buffers();
10015
Steven Rostedt73c51622009-03-11 13:42:01 -040010016 /* To save memory, keep the ring buffer size to its minimum */
10017 if (ring_buffer_expanded)
10018 ring_buf_size = trace_buf_size;
10019 else
10020 ring_buf_size = 1;
10021
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010022 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -070010023 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020010024
Steven Rostedt2b6080f2012-05-11 13:29:49 -040010025 raw_spin_lock_init(&global_trace.start_lock);
10026
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +010010027 /*
10028 * The prepare callbacks allocates some memory for the ring buffer. We
Qiujun Huang499f7bb2020-10-10 22:09:24 +080010029 * don't free the buffer if the CPU goes down. If we were to free
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +010010030 * the buffer, then the user would lose any trace that was in the
10031 * buffer. The memory will be removed once the "instance" is removed.
10032 */
10033 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10034 "trace/RB:preapre", trace_rb_cpu_prepare,
10035 NULL);
10036 if (ret < 0)
10037 goto out_free_cpumask;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -040010038 /* Used for event triggers */
Dan Carpenter147d88e02017-08-01 14:02:01 +030010039 ret = -ENOMEM;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -040010040 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10041 if (!temp_buffer)
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +010010042 goto out_rm_hp_state;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -040010043
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +090010044 if (trace_create_savedcmd() < 0)
10045 goto out_free_temp_buffer;
10046
Steven Rostedtab464282008-05-12 21:21:00 +020010047 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -050010048 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -050010049 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +090010050 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -040010051 }
Steven Rostedta7603ff2012-08-06 16:24:11 -040010052
Steven Rostedt499e5472012-02-22 15:50:28 -050010053 if (global_trace.buffer_disabled)
10054 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -040010055
Steven Rostedte1e232c2014-02-10 23:38:46 -050010056 if (trace_boot_clock) {
10057 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10058 if (ret < 0)
Joe Perchesa395d6a2016-03-22 14:28:09 -070010059 pr_warn("Trace clock %s not defined, going back to default\n",
10060 trace_boot_clock);
Steven Rostedte1e232c2014-02-10 23:38:46 -050010061 }
10062
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -040010063 /*
10064 * register_tracer() might reference current_trace, so it
10065 * needs to be set before we register anything. This is
10066 * just a bootstrap of current_trace anyway.
10067 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -040010068 global_trace.current_trace = &nop_trace;
10069
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -050010070 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10071
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -050010072 ftrace_init_global_array_ops(&global_trace);
10073
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -040010074 init_trace_flags_index(&global_trace);
10075
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -040010076 register_tracer(&nop_trace);
10077
Steven Rostedt (VMware)dbeafd02017-03-03 13:48:42 -050010078 /* Function tracing may start here (via kernel command line) */
10079 init_function_trace();
10080
Steven Rostedt60a11772008-05-12 21:20:44 +020010081 /* All seems OK, enable tracing */
10082 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -040010083
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040010084 atomic_notifier_chain_register(&panic_notifier_list,
10085 &trace_panic_notifier);
10086
10087 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +010010088
Steven Rostedtae63b31e2012-05-03 23:09:03 -040010089 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10090
10091 INIT_LIST_HEAD(&global_trace.systems);
10092 INIT_LIST_HEAD(&global_trace.events);
Tom Zanussi067fe032018-01-15 20:51:56 -060010093 INIT_LIST_HEAD(&global_trace.hist_vars);
Steven Rostedt (VMware)2f754e72019-04-01 22:52:21 -040010094 INIT_LIST_HEAD(&global_trace.err_log);
Steven Rostedtae63b31e2012-05-03 23:09:03 -040010095 list_add(&global_trace.list, &ftrace_trace_arrays);
10096
Jiaxing Wanga4d1e682015-11-04 09:14:29 +080010097 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -040010098
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -040010099 register_snapshot_cmd();
10100
Steven Rostedt (VMware)9a6944f2021-02-25 22:00:57 -050010101 test_can_verify();
10102
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +010010103 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040010104
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +090010105out_free_savedcmd:
10106 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -040010107out_free_temp_buffer:
10108 ring_buffer_free(temp_buffer);
Sebastian Andrzej Siewiorb32614c2016-11-27 00:13:34 +010010109out_rm_hp_state:
10110 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010111out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -070010112 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +103010113out_free_buffer_mask:
10114 free_cpumask_var(tracing_buffer_mask);
10115out:
10116 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020010117}
Steven Rostedtb2821ae2009-02-02 21:38:32 -050010118
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -050010119void __init early_trace_init(void)
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -050010120{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050010121 if (tracepoint_printk) {
10122 tracepoint_print_iter =
Steven Rostedt (VMware)0e1e71d2021-04-19 14:23:12 -040010123 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
Steven Rostedt (VMware)24589e32020-01-25 10:52:30 -050010124 if (MEM_FAIL(!tracepoint_print_iter,
10125 "Failed to allocate trace iterator\n"))
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050010126 tracepoint_printk = 0;
Steven Rostedt (Red Hat)423917452016-11-23 15:52:45 -050010127 else
10128 static_key_enable(&tracepoint_printk_key.key);
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050010129 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -050010130 tracer_alloc_buffers();
Steven Rostedt (VMware)e725c732017-03-03 13:37:33 -050010131}
10132
10133void __init trace_init(void)
10134{
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -040010135 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -050010136}
10137
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010138__init static void clear_boot_tracer(void)
Steven Rostedtb2821ae2009-02-02 21:38:32 -050010139{
10140 /*
10141 * The default tracer at boot buffer is an init section.
10142 * This function is called in lateinit. If we did not
10143 * find the boot tracer, then clear it out, to prevent
10144 * later registration from accessing the buffer that is
10145 * about to be freed.
10146 */
10147 if (!default_bootup_tracer)
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010148 return;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050010149
10150 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10151 default_bootup_tracer);
10152 default_bootup_tracer = NULL;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050010153}
10154
Chris Wilson3fd49c92018-03-30 16:01:31 +010010155#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010156__init static void tracing_set_default_clock(void)
Chris Wilson3fd49c92018-03-30 16:01:31 +010010157{
10158 /* sched_clock_stable() is determined in late_initcall */
Chris Wilson5125eee2018-04-04 22:24:50 +010010159 if (!trace_boot_clock && !sched_clock_stable()) {
Masami Ichikawabf24daa2020-01-16 22:12:36 +090010160 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10161 pr_warn("Can not set tracing clock due to lockdown\n");
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010162 return;
Masami Ichikawabf24daa2020-01-16 22:12:36 +090010163 }
10164
Chris Wilson3fd49c92018-03-30 16:01:31 +010010165 printk(KERN_WARNING
10166 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10167 "If you want to keep using the local clock, then add:\n"
10168 " \"trace_clock=local\"\n"
10169 "on the kernel command line\n");
10170 tracing_set_clock(&global_trace, "global");
10171 }
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010172}
10173#else
10174static inline void tracing_set_default_clock(void) { }
10175#endif
Chris Wilson3fd49c92018-03-30 16:01:31 +010010176
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010177__init static int late_trace_init(void)
10178{
10179 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10180 static_key_disable(&tracepoint_printk_key.key);
10181 tracepoint_printk = 0;
10182 }
10183
10184 tracing_set_default_clock();
10185 clear_boot_tracer();
Chris Wilson3fd49c92018-03-30 16:01:31 +010010186 return 0;
10187}
Steven Rostedt (VMware)f3860132021-06-17 10:51:02 -040010188
10189late_initcall_sync(late_trace_init);